You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:14:18 UTC

[01/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Repository: incubator-impala
Updated Branches:
  refs/heads/master a5e84ac01 -> 2a31fbdbf


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
new file mode 100644
index 0000000..d53aa9e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
@@ -0,0 +1,516 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.TreeNode;
+import com.cloudera.impala.thrift.TAggregateExpr;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.cloudera.impala.thrift.TFunctionBinaryType;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+public class FunctionCallExpr extends Expr {
+  private final FunctionName fnName_;
+  private final FunctionParams params_;
+  private boolean isAnalyticFnCall_ = false;
+  private boolean isInternalFnCall_ = false;
+
+  // Indicates whether this is a merge aggregation function that should use the merge
+  // instead of the update symbol. This flag also affects the behavior of
+  // resetAnalysisState() which is used during expr substitution.
+  private final boolean isMergeAggFn_;
+
+  // Printed in toSqlImpl(), if set. Used for merge agg fns.
+  private String label_;
+
+  public FunctionCallExpr(String functionName, List<Expr> params) {
+    this(new FunctionName(functionName), new FunctionParams(false, params));
+  }
+
+  public FunctionCallExpr(FunctionName fnName, List<Expr> params) {
+    this(fnName, new FunctionParams(false, params));
+  }
+
+  public FunctionCallExpr(FunctionName fnName, FunctionParams params) {
+    this(fnName, params, false);
+  }
+
+  private FunctionCallExpr(
+      FunctionName fnName, FunctionParams params, boolean isMergeAggFn) {
+    super();
+    fnName_ = fnName;
+    params_ = params;
+    isMergeAggFn_ = isMergeAggFn;
+    if (params.exprs() != null) children_ = Lists.newArrayList(params_.exprs());
+  }
+
+  /**
+   * Returns an Expr that evaluates the function call <fnName>(<params>). The returned
+   * Expr is not necessarily a FunctionCallExpr (example: DECODE())
+   */
+  public static Expr createExpr(FunctionName fnName, FunctionParams params) {
+    FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, params);
+    if (fnName.getFnNamePath().size() == 1
+            && fnName.getFnNamePath().get(0).equalsIgnoreCase("decode")
+        || fnName.getFnNamePath().size() == 2
+            && fnName.getFnNamePath().get(0).equalsIgnoreCase(Catalog.BUILTINS_DB)
+            && fnName.getFnNamePath().get(1).equalsIgnoreCase("decode")) {
+      return new CaseExpr(functionCallExpr);
+    }
+    return functionCallExpr;
+  }
+
+  /**
+   * Returns a new function call expr on the given params for performing the merge()
+   * step of the given aggregate function.
+   */
+  public static FunctionCallExpr createMergeAggCall(
+      FunctionCallExpr agg, List<Expr> params) {
+    Preconditions.checkState(agg.isAnalyzed_);
+    Preconditions.checkState(agg.isAggregateFunction());
+    FunctionCallExpr result = new FunctionCallExpr(
+        agg.fnName_, new FunctionParams(false, params), true);
+    // Inherit the function object from 'agg'.
+    result.fn_ = agg.fn_;
+    result.type_ = agg.type_;
+    // Set an explicit label based on the input agg.
+    if (agg.isMergeAggFn_) {
+      result.label_ = agg.label_;
+    } else {
+      // fn(input) becomes fn:merge(input).
+      result.label_ = agg.toSql().replaceFirst(agg.fnName_.toString(),
+          agg.fnName_.toString() + ":merge");
+    }
+    Preconditions.checkState(!result.type_.isWildcardDecimal());
+    return result;
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected FunctionCallExpr(FunctionCallExpr other) {
+    super(other);
+    fnName_ = other.fnName_;
+    isAnalyticFnCall_ = other.isAnalyticFnCall_;
+    isInternalFnCall_ = other.isInternalFnCall_;
+    isMergeAggFn_ = other.isMergeAggFn_;
+    // Clone the params in a way that keeps the children_ and the params.exprs()
+    // in sync. The children have already been cloned in the super c'tor.
+    if (other.params_.isStar()) {
+      Preconditions.checkState(children_.isEmpty());
+      params_ = FunctionParams.createStarParam();
+    } else {
+      params_ = new FunctionParams(other.params_.isDistinct(), children_);
+    }
+    label_ = other.label_;
+  }
+
+  public boolean isMergeAggFn() { return isMergeAggFn_; }
+
+  @Override
+  public void resetAnalysisState() {
+    isAnalyzed_ = false;
+    // Resolving merge agg functions after substitution may fail e.g., if the
+    // intermediate agg type is not the same as the output type. Preserve the original
+    // fn_ such that analyze() hits the special-case code for merge agg fns that
+    // handles this case.
+    if (!isMergeAggFn_) fn_ = null;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    FunctionCallExpr o = (FunctionCallExpr)obj;
+    return fnName_.equals(o.fnName_) &&
+           params_.isDistinct() == o.params_.isDistinct() &&
+           params_.isStar() == o.params_.isStar();
+  }
+
+  @Override
+  public String toSqlImpl() {
+    if (label_ != null) return label_;
+    // Merge agg fns should have an explicit label.
+    Preconditions.checkState(!isMergeAggFn_);
+    StringBuilder sb = new StringBuilder();
+    sb.append(fnName_).append("(");
+    if (params_.isStar()) sb.append("*");
+    if (params_.isDistinct()) sb.append("DISTINCT ");
+    sb.append(Joiner.on(", ").join(childrenToSql())).append(")");
+    return sb.toString();
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("name", fnName_)
+        .add("isStar", params_.isStar())
+        .add("isDistinct", params_.isDistinct())
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  public FunctionParams getParams() { return params_; }
+  public boolean isScalarFunction() {
+    Preconditions.checkNotNull(fn_);
+    return fn_ instanceof ScalarFunction ;
+  }
+
+  public Type getReturnType() {
+    Preconditions.checkNotNull(fn_);
+    return fn_.getReturnType();
+  }
+
+  /**
+   * Returns true if this is a call to a non-analytic aggregate function.
+   */
+  public boolean isAggregateFunction() {
+    Preconditions.checkNotNull(fn_);
+    return fn_ instanceof AggregateFunction && !isAnalyticFnCall_;
+  }
+
+  /**
+   * Returns true if this is a call to an aggregate function that returns
+   * non-null on an empty input (e.g. count).
+   */
+  public boolean returnsNonNullOnEmpty() {
+    Preconditions.checkNotNull(fn_);
+    return fn_ instanceof AggregateFunction &&
+        ((AggregateFunction)fn_).returnsNonNullOnEmpty();
+  }
+
+  public boolean isDistinct() {
+    Preconditions.checkState(isAggregateFunction());
+    return params_.isDistinct();
+  }
+
+  public boolean ignoresDistinct() {
+    Preconditions.checkState(isAggregateFunction());
+    return ((AggregateFunction)fn_).ignoresDistinct();
+  }
+
+  public FunctionName getFnName() { return fnName_; }
+  public void setIsAnalyticFnCall(boolean v) { isAnalyticFnCall_ = v; }
+  public void setIsInternalFnCall(boolean v) { isInternalFnCall_ = v; }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    if (isAggregateFunction() || isAnalyticFnCall_) {
+      msg.node_type = TExprNodeType.AGGREGATE_EXPR;
+      if (!isAnalyticFnCall_) msg.setAgg_expr(new TAggregateExpr(isMergeAggFn_));
+    } else {
+      msg.node_type = TExprNodeType.FUNCTION_CALL;
+    }
+  }
+
+  /**
+   * Aggregate functions are never constant.
+   */
+  @Override
+  public boolean isConstant() {
+    if (fn_ != null && fn_ instanceof AggregateFunction) return false;
+    return super.isConstant();
+  }
+
+  // Provide better error message for some aggregate builtins. These can be
+  // a bit more user friendly than a generic function not found.
+  // TODO: should we bother to do this? We could also improve the general
+  // error messages. For example, listing the alternatives.
+  protected String getFunctionNotFoundError(Type[] argTypes) {
+    if (fnName_.isBuiltin()) {
+      // Some custom error message for builtins
+      if (params_.isStar()) {
+        return "'*' can only be used in conjunction with COUNT";
+      }
+      if (fnName_.getFunction().equalsIgnoreCase("count")) {
+        if (!params_.isDistinct() && argTypes.length > 1) {
+          return "COUNT must have DISTINCT for multiple arguments: " + toSql();
+        }
+      }
+      if (fnName_.getFunction().equalsIgnoreCase("sum")) {
+        return "SUM requires a numeric parameter: " + toSql();
+      }
+      if (fnName_.getFunction().equalsIgnoreCase("avg")) {
+        return "AVG requires a numeric or timestamp parameter: " + toSql();
+      }
+    }
+
+    String[] argTypesSql = new String[argTypes.length];
+    for (int i = 0; i < argTypes.length; ++i) {
+      argTypesSql[i] = argTypes[i].toSql();
+    }
+    return String.format(
+        "No matching function with signature: %s(%s).",
+        fnName_, params_.isStar() ? "*" : Joiner.on(", ").join(argTypesSql));
+  }
+
+  /**
+   * Builtins that return decimals are specified as the wildcard decimal(decimal(*,*))
+   * and the specific decimal can only be determined based on the inputs. We currently
+   * don't have a mechanism to specify this with the UDF interface. Until we add
+   * that (i.e. allowing UDFs to participate in the planning phase), we will
+   * manually resolve the wildcard types for the few functions that need it.
+   * This can only be called for functions that return wildcard decimals and the first
+   * argument is a wildcard decimal.
+   * TODO: this prevents UDFs from using wildcard decimals and is in general not scalable.
+   * We should add a prepare_fn() to UDFs for doing this.
+   */
+  private Type resolveDecimalReturnType(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(type_.isWildcardDecimal());
+    Preconditions.checkState(fn_.getBinaryType() == TFunctionBinaryType.BUILTIN);
+    Preconditions.checkState(children_.size() > 0);
+
+    // Find first decimal input (some functions, such as if(), begin with non-decimal
+    // arguments).
+    ScalarType childType = null;
+    for (Expr child : children_) {
+      if (child.type_.isDecimal()) {
+        childType = (ScalarType) child.type_;
+        break;
+      }
+    }
+    Preconditions.checkState(childType != null && !childType.isWildcardDecimal());
+    Type returnType = childType;
+
+    if (fnName_.getFunction().equalsIgnoreCase("sum")) {
+      return childType.getMaxResolutionType();
+    }
+
+    int digitsBefore = childType.decimalPrecision() - childType.decimalScale();
+    int digitsAfter = childType.decimalScale();
+    if (fnName_.getFunction().equalsIgnoreCase("ceil") ||
+               fnName_.getFunction().equalsIgnoreCase("ceiling") ||
+               fnName_.getFunction().equals("floor") ||
+               fnName_.getFunction().equals("dfloor")) {
+      // These functions just return with scale 0 but can trigger rounding. We need
+      // to increase the precision by 1 to handle that.
+      ++digitsBefore;
+      digitsAfter = 0;
+    } else if (fnName_.getFunction().equalsIgnoreCase("truncate") ||
+               fnName_.getFunction().equalsIgnoreCase("dtrunc") ||
+               fnName_.getFunction().equalsIgnoreCase("round") ||
+               fnName_.getFunction().equalsIgnoreCase("dround")) {
+      if (children_.size() > 1) {
+        // The second argument to these functions is the desired scale, otherwise
+        // the default is 0.
+        Preconditions.checkState(children_.size() == 2);
+        if (children_.get(1).isNullLiteral()) {
+          throw new AnalysisException(fnName_.getFunction() +
+              "() cannot be called with a NULL second argument.");
+        }
+
+        if (!children_.get(1).isConstant()) {
+          // We don't allow calling truncate or round with a non-constant second
+          // (desired scale) argument. e.g. select round(col1, col2). This would
+          // mean we don't know the scale of the resulting type and would need some
+          // kind of dynamic type handling which is not yet possible. This seems like
+          // a reasonable restriction.
+          throw new AnalysisException(fnName_.getFunction() +
+              "() must be called with a constant second argument.");
+        }
+        NumericLiteral scaleLiteral = (NumericLiteral) LiteralExpr.create(
+            children_.get(1), analyzer.getQueryCtx());
+        digitsAfter = (int)scaleLiteral.getLongValue();
+        if (Math.abs(digitsAfter) > ScalarType.MAX_SCALE) {
+          throw new AnalysisException("Cannot round/truncate to scales greater than " +
+              ScalarType.MAX_SCALE + ".");
+        }
+        // Round/Truncate to a negative scale means to round to the digit before
+        // the decimal e.g. round(1234.56, -2) would be 1200.
+        // The resulting scale is always 0.
+        digitsAfter = Math.max(digitsAfter, 0);
+      } else {
+        // Round()/Truncate() with no second argument.
+        digitsAfter = 0;
+      }
+
+      if ((fnName_.getFunction().equalsIgnoreCase("round") ||
+           fnName_.getFunction().equalsIgnoreCase("dround")) &&
+          digitsAfter < childType.decimalScale()) {
+        // If we are rounding to fewer decimal places, it's possible we need another
+        // digit before the decimal.
+        ++digitsBefore;
+      }
+    }
+    Preconditions.checkState(returnType.isDecimal() && !returnType.isWildcardDecimal());
+    return ScalarType.createDecimalTypeInternal(digitsBefore + digitsAfter, digitsAfter);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+    fnName_.analyze(analyzer);
+
+    if (isMergeAggFn_) {
+      // This is the function call expr after splitting up to a merge aggregation.
+      // The function has already been analyzed so just do the minimal sanity
+      // check here.
+      AggregateFunction aggFn = (AggregateFunction)fn_;
+      Preconditions.checkNotNull(aggFn);
+      Type intermediateType = aggFn.getIntermediateType();
+      if (intermediateType == null) intermediateType = type_;
+      Preconditions.checkState(!type_.isWildcardDecimal());
+      return;
+    }
+
+    Type[] argTypes = collectChildReturnTypes();
+
+    // User needs DB access.
+    Db db = analyzer.getDb(fnName_.getDb(), Privilege.VIEW_METADATA, true);
+    if (!db.containsFunction(fnName_.getFunction())) {
+      throw new AnalysisException(fnName_ + "() unknown");
+    }
+
+    if (fnName_.getFunction().equals("count") && params_.isDistinct()) {
+      // Treat COUNT(DISTINCT ...) special because of how we do the rewrite.
+      // There is no version of COUNT() that takes more than 1 argument but after
+      // the rewrite, we only need count(*).
+      // TODO: fix how we rewrite count distinct.
+      argTypes = new Type[0];
+      Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
+      fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+      type_ = fn_.getReturnType();
+      // Make sure BE doesn't see any TYPE_NULL exprs
+      for (int i = 0; i < children_.size(); ++i) {
+        if (getChild(i).getType().isNull()) {
+          uncheckedCastChild(ScalarType.BOOLEAN, i);
+        }
+      }
+      return;
+    }
+
+    // TODO: We allow implicit cast from string->timestamp but only
+    // support avg(timestamp). This means avg(string_col) would work
+    // from our casting rules. This is not right.
+    // We need to revisit where implicit casts are allowed for string
+    // to timestamp
+    if (fnName_.getFunction().equalsIgnoreCase("avg") &&
+      children_.size() == 1 && children_.get(0).getType().isStringType()) {
+      throw new AnalysisException(
+          "AVG requires a numeric or timestamp parameter: " + toSql());
+    }
+
+    Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
+    fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+    if (fn_ == null || (!isInternalFnCall_ && !fn_.userVisible())) {
+      throw new AnalysisException(getFunctionNotFoundError(argTypes));
+    }
+
+    if (isAggregateFunction()) {
+      // subexprs must not contain aggregates
+      if (TreeNode.contains(children_, Expr.isAggregatePredicate())) {
+        throw new AnalysisException(
+            "aggregate function must not contain aggregate parameters: " + this.toSql());
+      }
+
+      // .. or analytic exprs
+      if (Expr.contains(children_, AnalyticExpr.class)) {
+        throw new AnalysisException(
+            "aggregate function must not contain analytic parameters: " + this.toSql());
+      }
+
+      // The catalog contains count() with no arguments to handle count(*) but don't
+      // accept count().
+      // TODO: can this be handled more cleanly. It does seem like a special case since
+      // no other aggregate functions (currently) can accept '*'.
+      if (fnName_.getFunction().equalsIgnoreCase("count") &&
+          !params_.isStar() && children_.size() == 0) {
+        throw new AnalysisException("count() is not allowed.");
+      }
+
+      // TODO: the distinct rewrite does not handle this but why?
+      if (params_.isDistinct()) {
+        // The second argument in group_concat(distinct) must be a constant expr that
+        // returns a string.
+        if (fnName_.getFunction().equalsIgnoreCase("group_concat")
+            && getChildren().size() == 2
+            && !getChild(1).isConstant()) {
+            throw new AnalysisException("Second parameter in GROUP_CONCAT(DISTINCT)" +
+                " must be a constant expression that returns a string.");
+        }
+        if (fn_.getBinaryType() != TFunctionBinaryType.BUILTIN) {
+          throw new AnalysisException("User defined aggregates do not support DISTINCT.");
+        }
+      }
+
+      AggregateFunction aggFn = (AggregateFunction)fn_;
+      if (aggFn.ignoresDistinct()) params_.setIsDistinct(false);
+    }
+
+    if (params_.isIgnoreNulls() && !isAnalyticFnCall_) {
+      throw new AnalysisException("Function " + fnName_.getFunction().toUpperCase()
+          + " does not accept the keyword IGNORE NULLS.");
+    }
+
+    if (isScalarFunction()) validateScalarFnParams(params_);
+    if (fn_ instanceof AggregateFunction
+        && ((AggregateFunction) fn_).isAnalyticFn()
+        && !((AggregateFunction) fn_).isAggregateFn()
+        && !isAnalyticFnCall_) {
+      throw new AnalysisException(
+          "Analytic function requires an OVER clause: " + toSql());
+    }
+
+    castForFunctionCall(false);
+    type_ = fn_.getReturnType();
+    if (type_.isDecimal() && type_.isWildcardDecimal()) {
+      type_ = resolveDecimalReturnType(analyzer);
+    }
+
+    // We do not allow any function to return a type CHAR or VARCHAR
+    // TODO add support for CHAR(N) and VARCHAR(N) return values in post 2.0,
+    // support for this was not added to the backend in 2.0
+    if (type_.isWildcardChar() || type_.isWildcardVarchar()) {
+      type_ = ScalarType.STRING;
+    }
+
+    // TODO(tmarshall): Differentiate based on the specific function.
+    if (hasChildCosts()) evalCost_ = getChildCosts() + FUNCTION_CALL_COST;
+  }
+
+  /**
+   * Checks that no special aggregate params are included in 'params' that would be
+   * invalid for a scalar function. Analysis of the param exprs is not done.
+   */
+  static void validateScalarFnParams(FunctionParams params)
+      throws AnalysisException {
+    if (params.isStar()) {
+      throw new AnalysisException("Cannot pass '*' to scalar function.");
+    }
+    if (params.isDistinct()) {
+      throw new AnalysisException("Cannot pass 'DISTINCT' to scalar function.");
+    }
+  }
+
+  @Override
+  public Expr clone() { return new FunctionCallExpr(this); }
+}


[20/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HdfsTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HdfsTableSink.java b/fe/src/main/java/com/cloudera/impala/planner/HdfsTableSink.java
deleted file mode 100644
index 7b97773..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HdfsTableSink.java
+++ /dev/null
@@ -1,157 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THdfsTableSink;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
-import com.google.common.base.Preconditions;
-
-/**
- * Base class for Hdfs data sinks such as HdfsTextTableSink.
- *
- */
-public class HdfsTableSink extends TableSink {
-  // Default number of partitions used for computeCosts() in the absence of column stats.
-  protected final long DEFAULT_NUM_PARTITIONS = 10;
-
-  // Exprs for computing the output partition(s).
-  protected final List<Expr> partitionKeyExprs_;
-  // Whether to overwrite the existing partition(s).
-  protected final boolean overwrite_;
-
-  public HdfsTableSink(Table targetTable, List<Expr> partitionKeyExprs,
-      boolean overwrite) {
-    super(targetTable, Op.INSERT);
-    Preconditions.checkState(targetTable instanceof HdfsTable);
-    partitionKeyExprs_ = partitionKeyExprs;
-    overwrite_ = overwrite;
-  }
-
-  @Override
-  public void computeCosts() {
-    HdfsTable table = (HdfsTable) targetTable_;
-    // TODO: Estimate the memory requirements more accurately by partition type.
-    HdfsFileFormat format = table.getMajorityFormat();
-    PlanNode inputNode = fragment_.getPlanRoot();
-    int numNodes = fragment_.getNumNodes();
-    // Compute the per-host number of partitions, taking the number of nodes
-    // and the data partition of the fragment executing this sink into account.
-    long numPartitions = fragment_.getNumDistinctValues(partitionKeyExprs_);
-    if (numPartitions == -1) numPartitions = DEFAULT_NUM_PARTITIONS;
-    long perPartitionMemReq = getPerPartitionMemReq(format);
-
-    // The estimate is based purely on the per-partition mem req if the input cardinality_
-    // or the avg row size is unknown.
-    if (inputNode.getCardinality() == -1 || inputNode.getAvgRowSize() == -1) {
-      perHostMemCost_ = numPartitions * perPartitionMemReq;
-      return;
-    }
-
-    // The per-partition estimate may be higher than the memory required to buffer
-    // the entire input data.
-    long perHostInputCardinality = Math.max(1L, inputNode.getCardinality() / numNodes);
-    long perHostInputBytes =
-        (long) Math.ceil(perHostInputCardinality * inputNode.getAvgRowSize());
-    perHostMemCost_ = Math.min(perHostInputBytes, numPartitions * perPartitionMemReq);
-  }
-
-  /**
-   * Returns the per-partition memory requirement for inserting into the given
-   * file format.
-   */
-  private long getPerPartitionMemReq(HdfsFileFormat format) {
-    switch (format) {
-      // Writing to a Parquet table requires up to 1GB of buffer per partition.
-      // TODO: The per-partition memory requirement is configurable in the QueryOptions.
-      case PARQUET: return 1024L * 1024L * 1024L;
-      case TEXT: return 100L * 1024L;
-      default:
-        Preconditions.checkState(false, "Unsupported TableSink format " +
-            format.toString());
-    }
-    return 0;
-  }
-
-  @Override
-  public String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel explainLevel) {
-    StringBuilder output = new StringBuilder();
-    String overwriteStr = ", OVERWRITE=" + (overwrite_ ? "true" : "false");
-    String partitionKeyStr = "";
-    if (!partitionKeyExprs_.isEmpty()) {
-      StringBuilder tmpBuilder = new StringBuilder(", PARTITION-KEYS=(");
-      for (Expr expr: partitionKeyExprs_) {
-        tmpBuilder.append(expr.toSql() + ",");
-      }
-      tmpBuilder.deleteCharAt(tmpBuilder.length() - 1);
-      tmpBuilder.append(")");
-      partitionKeyStr = tmpBuilder.toString();
-    }
-    output.append(String.format("%sWRITE TO HDFS [%s%s%s]\n", prefix,
-        targetTable_.getFullName(), overwriteStr, partitionKeyStr));
-    // Report the total number of partitions, independent of the number of nodes
-    // and the data partition of the fragment executing this sink.
-    if (explainLevel.ordinal() > TExplainLevel.MINIMAL.ordinal()) {
-      long totalNumPartitions = Expr.getNumDistinctValues(partitionKeyExprs_);
-      if (totalNumPartitions == -1) {
-        output.append(detailPrefix + "partitions=unavailable");
-      } else {
-        output.append(detailPrefix + "partitions="
-            + (totalNumPartitions == 0 ? 1 : totalNumPartitions));
-      }
-      output.append("\n");
-      if (explainLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-        output.append(PrintUtils.printHosts(detailPrefix, fragment_.getNumNodes()));
-        output.append(PrintUtils.printMemCost(" ", perHostMemCost_));
-        output.append("\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected TDataSink toThrift() {
-    TDataSink result = new TDataSink(TDataSinkType.TABLE_SINK);
-    THdfsTableSink hdfsTableSink = new THdfsTableSink(
-        Expr.treesToThrift(partitionKeyExprs_), overwrite_);
-    HdfsTable table = (HdfsTable) targetTable_;
-    StringBuilder error = new StringBuilder();
-    int skipHeaderLineCount = table.parseSkipHeaderLineCount(error);
-    // Errors will be caught during analysis.
-    Preconditions.checkState(error.length() == 0);
-    if (skipHeaderLineCount > 0) {
-      hdfsTableSink.setSkip_header_line_count(skipHeaderLineCount);
-    }
-    TTableSink tTableSink = new TTableSink(targetTable_.getId().asInt(),
-        TTableSinkType.HDFS, sinkOp_.toThrift());
-    tTableSink.hdfs_table_sink = hdfsTableSink;
-    result.table_sink = tTableSink;
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/JoinBuildSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/JoinBuildSink.java b/fe/src/main/java/com/cloudera/impala/planner/JoinBuildSink.java
deleted file mode 100644
index 25da277..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/JoinBuildSink.java
+++ /dev/null
@@ -1,103 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TJoinBuildSink;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Sink to materialize the build side of a join.
- */
-public class JoinBuildSink extends DataSink {
-  private final static Logger LOG = LoggerFactory.getLogger(JoinBuildSink.class);
-
-  // id of join's build-side table assigned during planning
-  private final JoinTableId joinTableId_;
-
-  private final List<Expr> buildExprs_ = Lists.newArrayList();
-
-  /**
-   * Creates sink for build side of 'joinNode' (extracts buildExprs_ from joinNode).
-   */
-  public JoinBuildSink(JoinTableId joinTableId, JoinNode joinNode) {
-    Preconditions.checkState(joinTableId.isValid());
-    joinTableId_ = joinTableId;
-    Preconditions.checkNotNull(joinNode);
-    Preconditions.checkState(joinNode instanceof JoinNode);
-    if (!(joinNode instanceof HashJoinNode)) return;
-    for (Expr eqJoinConjunct: joinNode.getEqJoinConjuncts()) {
-      BinaryPredicate p = (BinaryPredicate) eqJoinConjunct;
-      // by convention the build exprs are the rhs of the join conjuncts
-      buildExprs_.add(p.getChild(1).clone());
-    }
-  }
-
-  public JoinTableId getJoinTableId() { return joinTableId_; }
-
-  @Override
-  protected TDataSink toThrift() {
-    TDataSink result = new TDataSink(TDataSinkType.JOIN_BUILD_SINK);
-    TJoinBuildSink tBuildSink = new TJoinBuildSink();
-    tBuildSink.setJoin_table_id(joinTableId_.asInt());
-    for (Expr buildExpr: buildExprs_) {
-      tBuildSink.addToBuild_exprs(buildExpr.treeToThrift());
-    }
-    result.setJoin_build_sink(tBuildSink);
-    return result;
-  }
-
-  @Override
-  public String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s\n", prefix, "JOIN BUILD"));
-    if (detailLevel.ordinal() > TExplainLevel.MINIMAL.ordinal()) {
-      output.append(
-          detailPrefix + "join-table-id=" + joinTableId_.toString()
-            + " plan-id=" + fragment_.getPlanId().toString()
-            + " cohort-id=" + fragment_.getCohortId().toString() + "\n");
-      if (!buildExprs_.isEmpty()) {
-        output.append(detailPrefix + "build expressions: ")
-            .append(Expr.toSql(buildExprs_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  public void computeCosts() {
-    // TODO: implement?
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/JoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/JoinNode.java b/fe/src/main/java/com/cloudera/impala/planner/JoinNode.java
deleted file mode 100644
index ebc9b51..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/JoinNode.java
+++ /dev/null
@@ -1,508 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.ImpalaException;
-import com.google.common.base.Preconditions;
-
-/**
- * Logical join operator. Subclasses correspond to implementations of the join operator
- * (e.g. hash join, nested-loop join, etc).
- */
-public abstract class JoinNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(JoinNode.class);
-
-  // Default per-host memory requirement used if no valid stats are available.
-  // TODO: Come up with a more useful heuristic (e.g., based on scanned partitions).
-  protected final static long DEFAULT_PER_HOST_MEM = 2L * 1024L * 1024L * 1024L;
-
-  // Slop in percent allowed when comparing stats for the purpose of determining whether
-  // an equi-join condition is a foreign/primary key join.
-  protected final static double FK_PK_MAX_STATS_DELTA_PERC = 0.05;
-
-  protected JoinOperator joinOp_;
-
-  // Indicates if this join originates from a query block with a straight join hint.
-  protected final boolean isStraightJoin_;
-
-  // User-provided hint for the distribution mode. Set to 'NONE' if no hints were given.
-  protected final DistributionMode distrModeHint_;
-
-  protected DistributionMode distrMode_ = DistributionMode.NONE;
-
-  // Join conjuncts. eqJoinConjuncts_ are conjuncts of the form <lhs> = <rhs>;
-  // otherJoinConjuncts_ are non-equi join conjuncts. For an inner join, join conjuncts
-  // are conjuncts from the ON, USING or WHERE clauses. For other join types (e.g. outer
-  // and semi joins) these include only conjuncts from the ON and USING clauses.
-  protected List<BinaryPredicate> eqJoinConjuncts_;
-  protected List<Expr> otherJoinConjuncts_;
-
-  // if valid, the rhs input is materialized outside of this node and is assigned
-  // joinTableId_
-  protected JoinTableId joinTableId_ = JoinTableId.INVALID;
-
-  public enum DistributionMode {
-    NONE("NONE"),
-    BROADCAST("BROADCAST"),
-    PARTITIONED("PARTITIONED");
-
-    private final String description_;
-
-    private DistributionMode(String description) {
-      description_ = description;
-    }
-
-    @Override
-    public String toString() { return description_; }
-  }
-
-  public JoinNode(PlanNode outer, PlanNode inner, boolean isStraightJoin,
-      DistributionMode distrMode, JoinOperator joinOp,
-      List<BinaryPredicate> eqJoinConjuncts, List<Expr> otherJoinConjuncts,
-      String displayName) {
-    super(displayName);
-    Preconditions.checkNotNull(otherJoinConjuncts);
-    isStraightJoin_ = isStraightJoin;
-    distrModeHint_ = distrMode;
-    joinOp_ = joinOp;
-    children_.add(outer);
-    children_.add(inner);
-    eqJoinConjuncts_ = eqJoinConjuncts;
-    otherJoinConjuncts_ = otherJoinConjuncts;
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    Preconditions.checkState(children_.size() == 2);
-    clearTupleIds();
-    PlanNode outer = children_.get(0);
-    PlanNode inner = children_.get(1);
-
-    // Only retain the non-semi-joined tuples of the inputs.
-    switch (joinOp_) {
-      case LEFT_ANTI_JOIN:
-      case LEFT_SEMI_JOIN:
-      case NULL_AWARE_LEFT_ANTI_JOIN: {
-        tupleIds_.addAll(outer.getTupleIds());
-        break;
-      }
-      case RIGHT_ANTI_JOIN:
-      case RIGHT_SEMI_JOIN: {
-        tupleIds_.addAll(inner.getTupleIds());
-        break;
-      }
-      default: {
-        tupleIds_.addAll(outer.getTupleIds());
-        tupleIds_.addAll(inner.getTupleIds());
-        break;
-      }
-    }
-    tblRefIds_.addAll(outer.getTblRefIds());
-    tblRefIds_.addAll(inner.getTblRefIds());
-
-    // Inherits all the nullable tuple from the children
-    // Mark tuples that form the "nullable" side of the outer join as nullable.
-    nullableTupleIds_.addAll(inner.getNullableTupleIds());
-    nullableTupleIds_.addAll(outer.getNullableTupleIds());
-    if (joinOp_.equals(JoinOperator.FULL_OUTER_JOIN)) {
-      nullableTupleIds_.addAll(outer.getTupleIds());
-      nullableTupleIds_.addAll(inner.getTupleIds());
-    } else if (joinOp_.equals(JoinOperator.LEFT_OUTER_JOIN)) {
-      nullableTupleIds_.addAll(inner.getTupleIds());
-    } else if (joinOp_.equals(JoinOperator.RIGHT_OUTER_JOIN)) {
-      nullableTupleIds_.addAll(outer.getTupleIds());
-    }
-  }
-
-  public JoinOperator getJoinOp() { return joinOp_; }
-  public List<BinaryPredicate> getEqJoinConjuncts() { return eqJoinConjuncts_; }
-  public List<Expr> getOtherJoinConjuncts() { return otherJoinConjuncts_; }
-  public boolean isStraightJoin() { return isStraightJoin_; }
-  public DistributionMode getDistributionModeHint() { return distrModeHint_; }
-  public DistributionMode getDistributionMode() { return distrMode_; }
-  public void setDistributionMode(DistributionMode distrMode) { distrMode_ = distrMode; }
-  public JoinTableId getJoinTableId() { return joinTableId_; }
-  public void setJoinTableId(JoinTableId id) { joinTableId_ = id; }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    // Do not call super.init() to defer computeStats() until all conjuncts
-    // have been collected.
-    assignConjuncts(analyzer);
-    createDefaultSmap(analyzer);
-    assignedConjuncts_ = analyzer.getAssignedConjuncts();
-    otherJoinConjuncts_ = Expr.substituteList(otherJoinConjuncts_,
-        getCombinedChildSmap(), analyzer, false);
-  }
-
-  /**
-   * Returns the estimated cardinality of an inner or outer join.
-   *
-   * We estimate the cardinality based on equality join predicates of the form
-   * "L.c = R.d", with L being a table from child(0) and R a table from child(1).
-   * For each such join predicate we try to determine whether it is a foreign/primary
-   * key (FK/PK) join condition, and either use a special FK/PK estimation or a generic
-   * estimation method. We maintain the minimum cardinality for each method separately,
-   * and finally return in order of preference:
-   * - the FK/PK estimate, if there was at least one FP/PK predicate
-   * - the generic estimate, if there was at least one predicate with sufficient stats
-   * - otherwise, we optimistically assume a FK/PK join with a join selectivity of 1,
-   *   and return |child(0)|
-   *
-   * FK/PK estimation:
-   * cardinality = |child(0)| * (|child(1)| / |R|) * (NDV(R.d) / NDV(L.c))
-   * - the cardinality of a FK/PK must be <= |child(0)|
-   * - |child(1)| / |R| captures the reduction in join cardinality due to
-   *   predicates on the PK side
-   * - NDV(R.d) / NDV(L.c) adjusts the join cardinality to avoid underestimation
-   *   due to an independence assumption if the PK side has a higher NDV than the FK
-   *   side. The rationale is that rows filtered from the PK side do not necessarily
-   *   have a match on the FK side, and therefore would not affect the join cardinality.
-   *   TODO: Revisit this pessimistic adjustment that tends to overestimate.
-   *
-   * Generic estimation:
-   * cardinality = |child(0)| * |child(1)| / max(NDV(L.c), NDV(R.d))
-   * - case A: NDV(L.c) <= NDV(R.d)
-   *   every row from child(0) joins with |child(1)| / NDV(R.d) rows
-   * - case B: NDV(L.c) > NDV(R.d)
-   *   every row from child(1) joins with |child(0)| / NDV(L.c) rows
-   * - we adjust the NDVs from both sides to account for predicates that may
-   *   might have reduce the cardinality and NDVs
-   */
-  private long getJoinCardinality(Analyzer analyzer) {
-    Preconditions.checkState(
-        joinOp_ == JoinOperator.INNER_JOIN || joinOp_.isOuterJoin());
-
-    long lhsCard = getChild(0).cardinality_;
-    long rhsCard = getChild(1).cardinality_;
-    if (lhsCard == -1 || rhsCard == -1) return -1;
-
-    // Minimum of estimated join cardinalities for FK/PK join conditions.
-    long fkPkJoinCard = -1;
-    // Minimum of estimated join cardinalities for other join conditions.
-    long genericJoinCard = -1;
-    for (Expr eqJoinConjunct: eqJoinConjuncts_) {
-      SlotStats lhsStats = SlotStats.create(eqJoinConjunct.getChild(0));
-      SlotStats rhsStats = SlotStats.create(eqJoinConjunct.getChild(1));
-      // Ignore the equi-join conjunct if we have no relevant table or column stats.
-      if (lhsStats == null || rhsStats == null) continue;
-
-      // We assume a FK/PK join based on the following intuitions:
-      // 1. NDV(L.c) <= NDV(R.d)
-      //    The reasoning is that a FK/PK join is unlikely if the foreign key
-      //    side has a higher NDV than the primary key side. We may miss true
-      //    FK/PK joins due to inaccurate and/or stale stats.
-      // 2. R.d is probably a primary key.
-      //    Requires that NDV(R.d) is very close to |R|.
-      // The idea is that, by default, we assume that every join is a FK/PK join unless
-      // we have compelling evidence that suggests otherwise, so by using || we give the
-      // FK/PK assumption more chances to succeed.
-      if (lhsStats.ndv <= rhsStats.ndv * (1.0 + FK_PK_MAX_STATS_DELTA_PERC) ||
-          Math.abs(rhsStats.numRows - rhsStats.ndv) / (double) rhsStats.numRows
-            <= FK_PK_MAX_STATS_DELTA_PERC) {
-        // Adjust the join selectivity based on the NDV ratio to avoid underestimating
-        // the cardinality if the PK side has a higher NDV than the FK side.
-        double ndvRatio = (double) rhsStats.ndv / (double) lhsStats.ndv;
-        double rhsSelectivity = (double) rhsCard / (double) rhsStats.numRows;
-        long joinCard = (long) Math.ceil(lhsCard * rhsSelectivity * ndvRatio);
-        // FK/PK join cardinality must be <= the lhs cardinality.
-        joinCard = Math.min(lhsCard, joinCard);
-        if (fkPkJoinCard == -1) {
-          fkPkJoinCard = joinCard;
-        } else {
-          fkPkJoinCard = Math.min(fkPkJoinCard, joinCard);
-        }
-      } else {
-        // Adjust the NDVs on both sides to account for predicates. Intuitively, the NDVs
-        // should only decrease, so we bail if the adjustment would lead to an increase.
-        // TODO: Adjust the NDVs more systematically throughout the plan tree to
-        // get a more accurate NDV at this plan node.
-        if (lhsCard > lhsStats.numRows || rhsCard > rhsStats.numRows) continue;
-        double lhsAdjNdv = lhsStats.ndv * ((double)lhsCard / lhsStats.numRows);
-        double rhsAdjNdv = rhsStats.ndv * ((double)rhsCard / rhsStats.numRows);
-        // Generic join cardinality estimation.
-        long joinCard = (long) Math.ceil(
-            (lhsCard / Math.max(lhsAdjNdv, rhsAdjNdv)) * rhsCard);
-        if (genericJoinCard == -1) {
-          genericJoinCard = joinCard;
-        } else {
-          genericJoinCard = Math.min(genericJoinCard, joinCard);
-        }
-      }
-    }
-
-    if (fkPkJoinCard != -1) {
-      return fkPkJoinCard;
-    } else if (genericJoinCard != -1) {
-      return genericJoinCard;
-    } else {
-      // Optimistic FK/PK assumption with join selectivity of 1.
-      return lhsCard;
-    }
-  }
-
-  /**
-   * Class combining column and table stats for a particular slot. Contains the NDV
-   * for the slot and the number of rows in the originating table.
-   */
-  private static class SlotStats {
-    // Number of distinct values of the slot.
-    public final long ndv;
-    // Number of rows in the originating table.
-    public final long numRows;
-
-    public SlotStats(long ndv, long numRows) {
-      // Cap NDV at num rows of the table.
-      this.ndv = Math.min(ndv, numRows);
-      this.numRows = numRows;
-    }
-
-    /**
-     * Returns a new SlotStats object from the given expr that is guaranteed
-     * to have valid stats.
-     * Returns null if 'e' is not a SlotRef or a cast SlotRef, or if there are no
-     * valid table/column stats for 'e'.
-     */
-    public static SlotStats create(Expr e) {
-      // We need both the table and column stats, but 'e' might not directly reference
-      // a scan slot, e.g., if 'e' references a grouping slot of an agg. So we look for
-      // that source scan slot, traversing through materialization points if necessary.
-      SlotDescriptor slotDesc = e.findSrcScanSlot();
-      if (slotDesc == null) return null;
-      Table table = slotDesc.getParent().getTable();
-      if (table == null || table.getNumRows() == -1) return null;
-      if (!slotDesc.getStats().hasNumDistinctValues()) return null;
-      return new SlotStats(
-          slotDesc.getStats().getNumDistinctValues(), table.getNumRows());
-    }
-  }
-
-  /**
-   * Returns the estimated cardinality of a semi join node.
-   * For a left semi join between child(0) and child(1), we look for equality join
-   * conditions "L.c = R.d" (with L being from child(0) and R from child(1)) and use as
-   * the cardinality estimate the minimum of
-   *   |child(0)| * Min(NDV(L.c), NDV(R.d)) / NDV(L.c)
-   * over all suitable join conditions. The reasoning is that:
-   * - each row in child(0) is returned at most once
-   * - the probability of a row in child(0) having a match in R is
-   *   Min(NDV(L.c), NDV(R.d)) / NDV(L.c)
-   *
-   * For a left anti join we estimate the cardinality as the minimum of:
-   *   |L| * Max(NDV(L.c) - NDV(R.d), NDV(L.c)) / NDV(L.c)
-   * over all suitable join conditions. The reasoning is that:
-   * - each row in child(0) is returned at most once
-   * - if NDV(L.c) > NDV(R.d) then the probability of row in L having a match
-   *   in child(1) is (NDV(L.c) - NDV(R.d)) / NDV(L.c)
-   * - otherwise, we conservatively use |L| to avoid underestimation
-   *
-   * We analogously estimate the cardinality for right semi/anti joins, and treat the
-   * null-aware anti join like a regular anti join
-   *
-   * TODO: In order to take into account additional conjuncts in the child child subtrees
-   * adjust NDV(L.c) by |child(0)| / |L| and the NDV(R.d) by |child(1)| / |R|.
-   * The adjustment is currently too dangerous due to the other planner bugs compounding
-   * to bad plans causing perf regressions (IMPALA-976).
-   */
-  private long getSemiJoinCardinality() {
-    Preconditions.checkState(joinOp_.isSemiJoin());
-
-    // Return -1 if the cardinality of the returned side is unknown.
-    long cardinality;
-    if (joinOp_ == JoinOperator.RIGHT_SEMI_JOIN
-        || joinOp_ == JoinOperator.RIGHT_ANTI_JOIN) {
-      if (getChild(1).cardinality_ == -1) return -1;
-      cardinality = getChild(1).cardinality_;
-    } else {
-      if (getChild(0).cardinality_ == -1) return -1;
-      cardinality = getChild(0).cardinality_;
-    }
-    double minSelectivity = 1.0;
-    for (Expr eqJoinPredicate: eqJoinConjuncts_) {
-      long lhsNdv = getNdv(eqJoinPredicate.getChild(0));
-      lhsNdv = Math.min(lhsNdv, getChild(0).cardinality_);
-      long rhsNdv = getNdv(eqJoinPredicate.getChild(1));
-      rhsNdv = Math.min(rhsNdv, getChild(1).cardinality_);
-
-      // Skip conjuncts with unknown NDV on either side.
-      if (lhsNdv == -1 || rhsNdv == -1) continue;
-
-      double selectivity = 1.0;
-      switch (joinOp_) {
-        case LEFT_SEMI_JOIN: {
-          selectivity = (double) Math.min(lhsNdv, rhsNdv) / (double) (lhsNdv);
-          break;
-        }
-        case RIGHT_SEMI_JOIN: {
-          selectivity = (double) Math.min(lhsNdv, rhsNdv) / (double) (rhsNdv);
-          break;
-        }
-        case LEFT_ANTI_JOIN:
-        case NULL_AWARE_LEFT_ANTI_JOIN: {
-          selectivity = (double) Math.max(lhsNdv - rhsNdv, lhsNdv) / (double) lhsNdv;
-          break;
-        }
-        case RIGHT_ANTI_JOIN: {
-          selectivity = (double) Math.max(rhsNdv - lhsNdv, rhsNdv) / (double) rhsNdv;
-          break;
-        }
-        default: Preconditions.checkState(false);
-      }
-      minSelectivity = Math.min(minSelectivity, selectivity);
-    }
-
-    Preconditions.checkState(cardinality != -1);
-    return Math.round(cardinality * minSelectivity);
-  }
-
-  /**
-   * Unwraps the SlotRef in expr and returns the NDVs of it.
-   * Returns -1 if the NDVs are unknown or if expr is not a SlotRef.
-   */
-  private long getNdv(Expr expr) {
-    SlotRef slotRef = expr.unwrapSlotRef(false);
-    if (slotRef == null) return -1;
-    SlotDescriptor slotDesc = slotRef.getDesc();
-    if (slotDesc == null) return -1;
-    ColumnStats stats = slotDesc.getStats();
-    if (!stats.hasNumDistinctValues()) return -1;
-    return stats.getNumDistinctValues();
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    if (joinOp_.isSemiJoin()) {
-      cardinality_ = getSemiJoinCardinality();
-    } else if (joinOp_.isInnerJoin() || joinOp_.isOuterJoin()){
-      cardinality_ = getJoinCardinality(analyzer);
-    } else {
-      Preconditions.checkState(joinOp_.isCrossJoin());
-      long leftCard = getChild(0).cardinality_;
-      long rightCard = getChild(1).cardinality_;
-      if (leftCard != -1 && rightCard != -1) {
-        cardinality_ = multiplyCardinalities(leftCard, rightCard);
-      }
-    }
-
-    // Impose lower/upper bounds on the cardinality based on the join type.
-    long leftCard = getChild(0).cardinality_;
-    long rightCard = getChild(1).cardinality_;
-    switch (joinOp_) {
-      case LEFT_SEMI_JOIN: {
-        if (leftCard != -1) {
-          cardinality_ = Math.min(leftCard, cardinality_);
-        }
-        break;
-      }
-      case RIGHT_SEMI_JOIN: {
-        if (rightCard != -1) {
-          cardinality_ = Math.min(rightCard, cardinality_);
-        }
-        break;
-      }
-      case LEFT_OUTER_JOIN: {
-        if (leftCard != -1) {
-          cardinality_ = Math.max(leftCard, cardinality_);
-        }
-        break;
-      }
-      case RIGHT_OUTER_JOIN: {
-        if (rightCard != -1) {
-          cardinality_ = Math.max(rightCard, cardinality_);
-        }
-        break;
-      }
-      case FULL_OUTER_JOIN: {
-        if (leftCard != -1 && rightCard != -1) {
-          long cardinalitySum = addCardinalities(leftCard, rightCard);
-          cardinality_ = Math.max(cardinalitySum, cardinality_);
-        }
-        break;
-      }
-      case LEFT_ANTI_JOIN:
-      case NULL_AWARE_LEFT_ANTI_JOIN: {
-        if (leftCard != -1) {
-          cardinality_ = Math.min(leftCard, cardinality_);
-        }
-        break;
-      }
-      case RIGHT_ANTI_JOIN: {
-        if (rightCard != -1) {
-          cardinality_ = Math.min(rightCard, cardinality_);
-        }
-        break;
-      }
-      case CROSS_JOIN: {
-        if (getChild(0).cardinality_ == -1 || getChild(1).cardinality_ == -1) {
-          cardinality_ = -1;
-        } else {
-          cardinality_ = multiplyCardinalities(getChild(0).cardinality_,
-              getChild(1).cardinality_);
-        }
-        break;
-      }
-    }
-    cardinality_ = capAtLimit(cardinality_);
-    Preconditions.checkState(hasValidStats());
-    LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_));
-  }
-
-  /**
-   * Inverts the join op, swaps our children, and swaps the children
-   * of all eqJoinConjuncts_. All modifications are in place.
-   */
-  public void invertJoin() {
-    joinOp_ = joinOp_.invert();
-    Collections.swap(children_, 0, 1);
-    for (BinaryPredicate p: eqJoinConjuncts_) p.reverse();
-  }
-
-  public boolean hasConjuncts() {
-    return !eqJoinConjuncts_.isEmpty() || !otherJoinConjuncts_.isEmpty() ||
-        !conjuncts_.isEmpty();
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    StringBuilder output = new StringBuilder(joinOp_.toString());
-    if (distrMode_ != DistributionMode.NONE) output.append(", " + distrMode_.toString());
-    return output.toString();
-  }
-
-  protected void orderJoinConjunctsByCost() {
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    eqJoinConjuncts_ = orderConjunctsByCost(eqJoinConjuncts_);
-    otherJoinConjuncts_ = orderConjunctsByCost(otherJoinConjuncts_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/JoinTableId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/JoinTableId.java b/fe/src/main/java/com/cloudera/impala/planner/JoinTableId.java
deleted file mode 100644
index 5cf7a2b..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/JoinTableId.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class JoinTableId extends Id<JoinTableId> {
-  // Construction only allowed via an IdGenerator.
-  protected JoinTableId(int id) {
-    super(id);
-  }
-
-  public static JoinTableId INVALID;
-  static {
-    INVALID = new JoinTableId(Id.INVALID_ID);
-  }
-
-  public static IdGenerator<JoinTableId> createGenerator() {
-    return new IdGenerator<JoinTableId>() {
-      @Override
-      public JoinTableId getNextId() { return new JoinTableId(nextId_++); }
-      @Override
-      public JoinTableId getMaxId() { return new JoinTableId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("%02d", id_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/KuduScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/KuduScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/KuduScanNode.java
deleted file mode 100644
index 4f654a9..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/KuduScanNode.java
+++ /dev/null
@@ -1,358 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Set;
-
-import org.apache.kudu.ColumnSchema;
-import org.apache.kudu.Schema;
-import org.apache.kudu.client.KuduClient;
-import org.apache.kudu.client.KuduClient.KuduClientBuilder;
-import org.apache.kudu.client.KuduPredicate;
-import org.apache.kudu.client.KuduPredicate.ComparisonOp;
-import org.apache.kudu.client.KuduScanToken;
-import org.apache.kudu.client.KuduScanToken.KuduScanTokenBuilder;
-import org.apache.kudu.client.LocatedTablet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TKuduScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Scan of a single Kudu table.
- *
- * Extracts predicates that can be pushed down to Kudu. Currently only binary predicates
- * that have a constant expression on one side and a slot ref on the other can be
- * evaluated by Kudu.
- *
- * Uses the Kudu ScanToken API to generate a set of Kudu "scan tokens" which are used for
- * scheduling and initializing the scanners. Scan tokens are opaque objects that represent
- * a scan for some Kudu data on a tablet (currently one token represents one tablet), and
- * it contains the tablet locations and all information needed to produce a Kudu scanner,
- * including the projected columns and predicates that are pushed down.
- *
- * After KUDU-1065 is resolved, Kudu will also prune the tablets that don't need to be
- * scanned, and only the tokens for those tablets will be returned.
- */
-public class KuduScanNode extends ScanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(KuduScanNode.class);
-
-  private final KuduTable kuduTable_;
-
-  // Indexes for the set of hosts that will be used for the query.
-  // From analyzer.getHostIndex().getIndex(address)
-  private final Set<Integer> hostIndexSet_ = Sets.newHashSet();
-
-  // List of conjuncts that can be pushed down to Kudu, after they have been normalized
-  // by BinaryPredicate.normalizeSlotRefComparison(). Used for computing stats and
-  // explain strings.
-  private final List<Expr> kuduConjuncts_ = Lists.newArrayList();
-
-  // Exprs in kuduConjuncts_ converted to KuduPredicates.
-  private final List<KuduPredicate> kuduPredicates_ = Lists.newArrayList();
-
-  public KuduScanNode(PlanNodeId id, TupleDescriptor desc) {
-    super(id, desc, "SCAN KUDU");
-    kuduTable_ = (KuduTable) desc_.getTable();
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaRuntimeException {
-    assignConjuncts(analyzer);
-    analyzer.createEquivConjuncts(tupleIds_.get(0), conjuncts_);
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-
-    try (KuduClient client =
-         new KuduClientBuilder(kuduTable_.getKuduMasterAddresses()).build()) {
-      org.apache.kudu.client.KuduTable rpcTable =
-          client.openTable(kuduTable_.getKuduTableName());
-      validateSchema(rpcTable);
-
-      // Extract predicates that can be evaluated by Kudu.
-      extractKuduConjuncts(analyzer, client, rpcTable);
-
-      // Materialize the slots of the remaining conjuncts (i.e. those not pushed to Kudu)
-      analyzer.materializeSlots(conjuncts_);
-
-      // Creates Kudu scan tokens and sets the scan range locations.
-      computeScanRangeLocations(analyzer, client, rpcTable);
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Unable to initialize the Kudu scan node", e);
-    }
-
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-  }
-
-  /**
-   * Validate the columns Impala expects are actually in the Kudu table.
-   */
-  private void validateSchema(org.apache.kudu.client.KuduTable rpcTable)
-      throws ImpalaRuntimeException {
-    Schema tableSchema = rpcTable.getSchema();
-    for (SlotDescriptor desc: getTupleDesc().getSlots()) {
-      String colName = desc.getColumn().getName();
-      try {
-        tableSchema.getColumn(colName);
-      } catch (Exception e) {
-        throw new ImpalaRuntimeException("Column '" + colName + "' not found in kudu " +
-            "table " + rpcTable.getName());
-      }
-    }
-  }
-
-  /**
-   * Compute the scan range locations for the given table using the scan tokens.
-   */
-  private void computeScanRangeLocations(Analyzer analyzer,
-      KuduClient client, org.apache.kudu.client.KuduTable rpcTable)
-      throws ImpalaRuntimeException {
-    scanRanges_ = Lists.newArrayList();
-
-    List<KuduScanToken> scanTokens = createScanTokens(client, rpcTable);
-    for (KuduScanToken token: scanTokens) {
-      LocatedTablet tablet = token.getTablet();
-      List<TScanRangeLocation> locations = Lists.newArrayList();
-      if (tablet.getReplicas().isEmpty()) {
-        throw new ImpalaRuntimeException(String.format(
-            "At least one tablet does not have any replicas. Tablet ID: %s",
-            new String(tablet.getTabletId(), Charsets.UTF_8)));
-      }
-
-      for (LocatedTablet.Replica replica: tablet.getReplicas()) {
-        TNetworkAddress address =
-            new TNetworkAddress(replica.getRpcHost(), replica.getRpcPort());
-        // Use the network address to look up the host in the global list
-        Integer hostIndex = analyzer.getHostIndex().getIndex(address);
-        locations.add(new TScanRangeLocation(hostIndex));
-        hostIndexSet_.add(hostIndex);
-      }
-
-      TScanRange scanRange = new TScanRange();
-      try {
-        scanRange.setKudu_scan_token(token.serialize());
-      } catch (IOException e) {
-        throw new ImpalaRuntimeException("Unable to serialize Kudu scan token=" +
-            token.toString(), e);
-      }
-
-      TScanRangeLocations locs = new TScanRangeLocations();
-      locs.setScan_range(scanRange);
-      locs.locations = locations;
-      scanRanges_.add(locs);
-    }
-  }
-
-  /**
-   * Returns KuduScanTokens for this scan given the projected columns and predicates that
-   * will be pushed to Kudu.
-   */
-  private List<KuduScanToken> createScanTokens(KuduClient client,
-      org.apache.kudu.client.KuduTable rpcTable) {
-    List<String> projectedCols = Lists.newArrayList();
-    for (SlotDescriptor desc: getTupleDesc().getSlots()) {
-      if (desc.isMaterialized()) projectedCols.add(desc.getColumn().getName());
-    }
-
-    KuduScanTokenBuilder tokenBuilder = client.newScanTokenBuilder(rpcTable);
-    tokenBuilder.setProjectedColumnNames(projectedCols);
-    for (KuduPredicate predicate: kuduPredicates_) tokenBuilder.addPredicate(predicate);
-    return tokenBuilder.build();
-  }
-
-  @Override
-  protected double computeSelectivity() {
-    List<Expr> allConjuncts = Lists.newArrayList(
-        Iterables.concat(conjuncts_, kuduConjuncts_));
-    return computeCombinedSelectivity(allConjuncts);
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    // Update the number of nodes to reflect the hosts that have relevant data.
-    numNodes_ = hostIndexSet_.size();
-
-    // Update the cardinality
-    inputCardinality_ = cardinality_ = kuduTable_.getNumRows();
-    cardinality_ *= computeSelectivity();
-    cardinality_ = Math.min(Math.max(1, cardinality_), kuduTable_.getNumRows());
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats KuduScan: cardinality=" + Long.toString(cardinality_));
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder result = new StringBuilder();
-
-    String aliasStr = desc_.hasExplicitAlias() ? " " + desc_.getAlias() : "";
-    result.append(String.format("%s%s:%s [%s%s]\n", prefix, id_.toString(), displayName_,
-        kuduTable_.getFullName(), aliasStr));
-
-    switch (detailLevel) {
-      case MINIMAL: break;
-      case STANDARD: // Fallthrough intended.
-      case EXTENDED: // Fallthrough intended.
-      case VERBOSE: {
-        if (!conjuncts_.isEmpty()) {
-          result.append(detailPrefix + "predicates: " + getExplainString(conjuncts_)
-              + "\n");
-        }
-        if (!kuduConjuncts_.isEmpty()) {
-          result.append(detailPrefix + "kudu predicates: " + getExplainString(
-              kuduConjuncts_) + "\n");
-        }
-      }
-    }
-    return result.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode node) {
-    node.node_type = TPlanNodeType.KUDU_SCAN_NODE;
-    node.kudu_scan_node = new TKuduScanNode(desc_.getId().asInt());
-  }
-
-  /**
-   * Extracts predicates from conjuncts_ that can be pushed down to Kudu. Currently only
-   * binary predicates that have a constant expression on one side and a slot ref on the
-   * other can be evaluated by Kudu. Only looks at comparisons of constants (i.e., the
-   * bounds of the result can be evaluated with Expr::GetValue(NULL)). If a conjunct can
-   * be converted into this form, the normalized expr is added to kuduConjuncts_, a
-   * KuduPredicate is added to kuduPredicates_, and the original expr from conjuncts_ is
-   * removed.
-   */
-  private void extractKuduConjuncts(Analyzer analyzer,
-      KuduClient client, org.apache.kudu.client.KuduTable rpcTable) {
-    ListIterator<Expr> it = conjuncts_.listIterator();
-    while (it.hasNext()) {
-      if (tryConvertKuduPredicate(analyzer, rpcTable, it.next())) it.remove();
-    }
-  }
-
-  /**
-   * If 'expr' can be converted to a KuduPredicate, returns true and updates
-   * kuduPredicates_ and kuduConjuncts_.
-   */
-  private boolean tryConvertKuduPredicate(Analyzer analyzer,
-      org.apache.kudu.client.KuduTable table, Expr expr) {
-    if (!(expr instanceof BinaryPredicate)) return false;
-    BinaryPredicate predicate = (BinaryPredicate) expr;
-
-    // TODO KUDU-931 look into handling implicit/explicit casts on the SlotRef.
-    predicate = BinaryPredicate.normalizeSlotRefComparison(predicate, analyzer);
-    if (predicate == null) return false;
-    ComparisonOp op = getKuduOperator(((BinaryPredicate)predicate).getOp());
-    if (op == null) return false;
-
-    SlotRef ref = (SlotRef) predicate.getChild(0);
-    LiteralExpr literal = (LiteralExpr) predicate.getChild(1);
-
-    // Cannot push prediates with null literal values (KUDU-1595).
-    if (literal instanceof NullLiteral) return false;
-
-    String colName = ref.getDesc().getColumn().getName();
-    ColumnSchema column = table.getSchema().getColumn(colName);
-    KuduPredicate kuduPredicate = null;
-    switch (literal.getType().getPrimitiveType()) {
-      case BOOLEAN: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            ((BoolLiteral)literal).getValue());
-        break;
-      }
-      case TINYINT:
-      case SMALLINT:
-      case INT: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            ((NumericLiteral)literal).getIntValue());
-        break;
-      }
-      case BIGINT: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            ((NumericLiteral)literal).getLongValue());
-        break;
-      }
-      case FLOAT: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            (float)((NumericLiteral)literal).getDoubleValue());
-        break;
-      }
-      case DOUBLE: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            ((NumericLiteral)literal).getDoubleValue());
-        break;
-      }
-      case STRING:
-      case VARCHAR:
-      case CHAR: {
-        kuduPredicate = KuduPredicate.newComparisonPredicate(column, op,
-            ((StringLiteral)literal).getStringValue());
-        break;
-      }
-      default: break;
-    }
-    if (kuduPredicate == null) return false;
-
-    kuduConjuncts_.add(predicate);
-    kuduPredicates_.add(kuduPredicate);
-    return true;
-  }
-
-  /**
-   * Returns a Kudu comparison operator for the BinaryPredicate operator, or null if
-   * the operation is not supported by Kudu.
-   */
-  private static KuduPredicate.ComparisonOp getKuduOperator(BinaryPredicate.Operator op) {
-    switch (op) {
-      case GT: return ComparisonOp.GREATER;
-      case LT: return ComparisonOp.LESS;
-      case GE: return ComparisonOp.GREATER_EQUAL;
-      case LE: return ComparisonOp.LESS_EQUAL;
-      case EQ: return ComparisonOp.EQUAL;
-      default: return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/KuduTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/KuduTableSink.java b/fe/src/main/java/com/cloudera/impala/planner/KuduTableSink.java
deleted file mode 100644
index 8e8ac63..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/KuduTableSink.java
+++ /dev/null
@@ -1,88 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TKuduTableSink;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
-import com.google.common.collect.Lists;
-
-/**
- * Class used to represent a Sink that will transport
- * data from a plan fragment into an Kudu table using a Kudu client.
- */
-public class KuduTableSink extends TableSink {
-
-  // Optional list of referenced Kudu table column indices. The position of a result
-  // expression i matches a column index into the Kudu schema at targetColdIdxs[i].
-  private ArrayList<Integer> targetColIdxs_;
-
-  private final boolean ignoreNotFoundOrDuplicate_;
-
-  public KuduTableSink(Table targetTable, Op sinkOp,
-      List<Integer> referencedColumns, boolean ignoreNotFoundOrDuplicate) {
-    super(targetTable, sinkOp);
-    targetColIdxs_ = referencedColumns != null
-        ? Lists.newArrayList(referencedColumns) : null;
-    ignoreNotFoundOrDuplicate_ = ignoreNotFoundOrDuplicate;
-  }
-
-  @Override
-  public String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel explainLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(prefix + sinkOp_.toExplainString());
-    output.append(" KUDU [" + targetTable_.getFullName() + "]\n");
-    output.append(detailPrefix);
-    if (sinkOp_ == Op.INSERT) {
-      output.append("check unique keys: ");
-    } else {
-      output.append("check keys exist: ");
-    }
-    output.append(ignoreNotFoundOrDuplicate_);
-    output.append("\n");
-    if (explainLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(PrintUtils.printHosts(detailPrefix, fragment_.getNumNodes()));
-      output.append(PrintUtils.printMemCost(" ", perHostMemCost_));
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected TDataSink toThrift() {
-    TDataSink result = new TDataSink(TDataSinkType.TABLE_SINK);
-    TTableSink tTableSink = new TTableSink(targetTable_.getId().asInt(),
-        TTableSinkType.KUDU, sinkOp_.toThrift());
-    TKuduTableSink tKuduSink = new TKuduTableSink();
-    tKuduSink.setReferenced_columns(targetColIdxs_);
-    tKuduSink.setIgnore_not_found_or_duplicate(ignoreNotFoundOrDuplicate_);
-    tTableSink.setKudu_table_sink(tKuduSink);
-    result.table_sink = tTableSink;
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/NestedLoopJoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/NestedLoopJoinNode.java b/fe/src/main/java/com/cloudera/impala/planner/NestedLoopJoinNode.java
deleted file mode 100644
index e989438..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/NestedLoopJoinNode.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNestedLoopJoinNode;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-/**
- * Nested-loop join between left child and right child.
- * Initially, the join operator fully materializes the right input in memory.
- * Subsequently, for every row from the left input it identifies the matching rows
- * from the right hand side and produces the join result according to the join operator.
- * The nested-loop join is used when there are no equi-join predicates. Hence,
- * eqJoinConjuncts_ should be empty and all the join conjuncts are stored in
- * otherJoinConjuncts_. Currrently, all join operators are supported except for
- * null-aware anti join.
- *
- * Note: The operator does not spill to disk when there is not enough memory to hold the
- * right input.
- */
-public class NestedLoopJoinNode extends JoinNode {
-  private final static Logger LOG = LoggerFactory.getLogger(NestedLoopJoinNode.class);
-
-  public NestedLoopJoinNode(PlanNode outer, PlanNode inner, boolean isStraightJoin,
-      DistributionMode distrMode, JoinOperator joinOp, List<Expr> otherJoinConjuncts) {
-    super(outer, inner, isStraightJoin, distrMode, joinOp,
-        Collections.<BinaryPredicate>emptyList(), otherJoinConjuncts,
-        "NESTED LOOP JOIN");
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    super.init(analyzer);
-    Preconditions.checkState(eqJoinConjuncts_.isEmpty());
-    // Set the proper join operator based on whether predicates are assigned or not.
-    if (conjuncts_.isEmpty() && otherJoinConjuncts_.isEmpty() && !joinOp_.isSemiJoin() &&
-        !joinOp_.isOuterJoin()) {
-      joinOp_ = JoinOperator.CROSS_JOIN;
-    } else if (joinOp_.isCrossJoin()) {
-      // A cross join with predicates is an inner join.
-      joinOp_ = JoinOperator.INNER_JOIN;
-    }
-    orderJoinConjunctsByCost();
-    computeStats(analyzer);
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    if (getChild(1).getCardinality() == -1 || getChild(1).getAvgRowSize() == -1
-        || numNodes_ == 0) {
-      perHostMemCost_ = DEFAULT_PER_HOST_MEM;
-      return;
-    }
-    perHostMemCost_ =
-        (long) Math.ceil(getChild(1).cardinality_ * getChild(1).avgRowSize_);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    String labelDetail = getDisplayLabelDetail();
-    if (labelDetail == null) {
-      output.append(prefix + getDisplayLabel() + "\n");
-    } else {
-      output.append(String.format("%s%s:%s [%s]\n", prefix, id_.toString(),
-          displayName_, getDisplayLabelDetail()));
-    }
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (joinTableId_.isValid()) {
-          output.append(
-              detailPrefix + "join table id: " + joinTableId_.toString() + "\n");
-      }
-      if (!otherJoinConjuncts_.isEmpty()) {
-        output.append(detailPrefix + "join predicates: ")
-        .append(getExplainString(otherJoinConjuncts_) + "\n");
-      }
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "predicates: ")
-        .append(getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.NESTED_LOOP_JOIN_NODE;
-    msg.nested_loop_join_node = new TNestedLoopJoinNode();
-    msg.nested_loop_join_node.join_op = joinOp_.toThrift();
-    for (Expr e: otherJoinConjuncts_) {
-      msg.nested_loop_join_node.addToJoin_conjuncts(e.treeToThrift());
-    }
-  }
-
-  @Override
-  protected String debugString() {
-    return Objects.toStringHelper(this)
-        .addValue(super.debugString())
-        .toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/ParallelPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/ParallelPlanner.java b/fe/src/main/java/com/cloudera/impala/planner/ParallelPlanner.java
deleted file mode 100644
index 905d68d..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/ParallelPlanner.java
+++ /dev/null
@@ -1,205 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.common.IdGenerator;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * The parallel planner is responsible for breaking up a single distributed plan
- * (= tree of PlanFragments) into a (logical) tree of distributed plans. The root
- * of that tree produces the query result, all the other ones produce intermediate
- * join build sides. All plans that produce intermediate join build sides (one per join
- * node in the recipient) for a single recipient plan are grouped together into a
- * cohort. Since each plan may only produce a build side for at most one recipient
- * plan, each plan belongs to exactly one cohort.
- *
- * TODO: if the input to the JoinBuildSink is the result of a grouping aggregation
- * on the join keys, the AggregationNode should materialize the final hash table
- * directly (instead of reading the hash table content and feeding it into a
- * JoinBuildSink to build another hash table)
- *
- * TODO: instead of cohort ids, create a Plan class that is a subclass of TreeNode?
- */
-public class ParallelPlanner {
-  private final static Logger LOG = LoggerFactory.getLogger(ParallelPlanner.class);
-
-  private final IdGenerator<JoinTableId> joinTableIdGenerator_ =
-      JoinTableId.createGenerator();
-  private final IdGenerator<PlanId> planIdGenerator_ = PlanId.createGenerator();
-  private final IdGenerator<CohortId> cohortIdGenerator_ = CohortId.createGenerator();
-  private final PlannerContext ctx_;
-
-  private List<PlanFragment> planRoots_ = Lists.newArrayList();
-
-  public ParallelPlanner(PlannerContext ctx) { ctx_ = ctx; }
-
-  /**
-   * Given a distributed plan, return list of plans ready for parallel execution.
-   * The last plan in the sequence materializes the query result, the preceding
-   * plans materialize the build sides of joins.
-   * Assigns cohortId and planId for all fragments.
-   * TODO: create class DistributedPlan with a PlanFragment member, so we don't
-   * need to distinguish PlanFragment and Plan through comments?
-   */
-  public List<PlanFragment> createPlans(PlanFragment root) {
-    root.setPlanId(planIdGenerator_.getNextId());
-    root.setCohortId(cohortIdGenerator_.getNextId());
-    planRoots_.add(root);
-    createBuildPlans(root, null);
-    return planRoots_;
-  }
-
-  /**
-   * Recursively traverse tree of fragments of 'plan' from top to bottom and
-   * move all build inputs of joins into separate plans. 'buildCohortId' is the
-   * cohort id of the build plans of 'fragment' and may be null if the plan
-   * to which 'fragment' belongs has so far not required any build plans.
-   * Assign fragment's plan id and cohort id to children.
-   */
-  private void createBuildPlans(PlanFragment fragment, CohortId buildCohortId) {
-    LOG.info("createbuildplans fragment " + fragment.getId().toString());
-    List<JoinNode> joins = Lists.newArrayList();
-    collectJoins(fragment.getPlanRoot(), joins);
-    if (!joins.isEmpty()) {
-      List<String> joinIds = Lists.newArrayList();
-      for (JoinNode join: joins) joinIds.add(join.getId().toString());
-      LOG.info("collected joins " + Joiner.on(" ").join(joinIds));
-
-      if (buildCohortId == null) buildCohortId = cohortIdGenerator_.getNextId();
-      for (JoinNode join: joins) createBuildPlan(join, buildCohortId);
-    }
-
-    if (!fragment.getChildren().isEmpty()) {
-      List<String> ids = Lists.newArrayList();
-      for (PlanFragment c: fragment.getChildren()) ids.add(c.getId().toString());
-      LOG.info("collected children " + Joiner.on(" ").join(ids) + " parent "
-          + fragment.getId().toString());
-    }
-    for (PlanFragment child: fragment.getChildren()) {
-      child.setPlanId(fragment.getPlanId());
-      child.setCohortId(fragment.getCohortId());
-      createBuildPlans(child, buildCohortId);
-    }
-  }
-
-  /**
-   * Collect all JoinNodes that aren't themselves the build side of a join node
-   * in this fragment or the rhs of a SubplanNode.
-   */
-  private void collectJoins(PlanNode node, List<JoinNode> result) {
-    if (node instanceof JoinNode) {
-      result.add((JoinNode)node);
-      // for joins, only descend through the probe side;
-      // we're recursively traversing the build side when constructing the build plan
-      // in createBuildPlan()
-      collectJoins(node.getChild(0), result);
-      return;
-    }
-    if (node instanceof ExchangeNode) return;
-    if (node instanceof SubplanNode) {
-      collectJoins(node.getChild(0), result);
-      return;
-    }
-    for (PlanNode child: node.getChildren()) collectJoins(child, result);
-  }
-
-  /**
-   * Collect all ExchangeNodes in this fragment.
-   */
-  private void collectExchangeNodes(PlanNode node, List<ExchangeNode> result) {
-    if (node instanceof ExchangeNode) {
-      result.add((ExchangeNode)node);
-      return;
-    }
-    for (PlanNode child: node.getChildren()) collectExchangeNodes(child, result);
-  }
-
-  /**
-   * Create new plan that materializes build input of 'join' and assign it 'cohortId'.
-   * In the process, moves all fragments required for this materialization from tree
-   * rooted at 'join's fragment into the new plan.
-   * Also assigns the new plan a plan id.
-   */
-  private void createBuildPlan(JoinNode join, CohortId cohortId) {
-    LOG.info("createbuildplan " + join.getId().toString());
-    Preconditions.checkNotNull(cohortId);
-    // collect all ExchangeNodes on the build side and their corresponding input
-    // fragments
-    final List<ExchangeNode> exchNodes = Lists.newArrayList();
-    collectExchangeNodes(join.getChild(1), exchNodes);
-
-    com.google.common.base.Predicate<PlanFragment> isInputFragment =
-        new com.google.common.base.Predicate<PlanFragment>() {
-          public boolean apply(PlanFragment f) {
-            // we're starting with the fragment containing the join, which might
-            // be terminal
-            if (f.getDestNode() == null) return false;
-            for (ExchangeNode exch: exchNodes) {
-              if (exch.getId() == f.getDestNode().getId()) return true;
-            }
-            return false;
-          }
-        };
-    List<PlanFragment> inputFragments = Lists.newArrayList();
-    join.getFragment().collect(isInputFragment, inputFragments);
-    Preconditions.checkState(exchNodes.size() == inputFragments.size());
-
-    // Create new fragment with JoinBuildSink that consumes the output of the
-    // join's rhs input (the one that materializes the build side).
-    // The new fragment has the same data partition as the join node's fragment.
-    JoinBuildSink buildSink =
-        new JoinBuildSink(joinTableIdGenerator_.getNextId(), join);
-    join.setJoinTableId(buildSink.getJoinTableId());
-    // c'tor fixes up PlanNode.fragment_
-    PlanFragment buildFragment = new PlanFragment(ctx_.getNextFragmentId(),
-        join.getChild(1), join.getFragment().getDataPartition());
-    buildFragment.setSink(buildSink);
-
-    // move input fragments
-    for (int i = 0; i < exchNodes.size(); ++i) {
-      LOG.info("re-link fragment " + inputFragments.get(i).getId().toString() + " to "
-          + exchNodes.get(i).getFragment().getId().toString());
-      Preconditions.checkState(exchNodes.get(i).getFragment() == buildFragment);
-      join.getFragment().removeChild(inputFragments.get(i));
-      buildFragment.getChildren().add(inputFragments.get(i));
-    }
-
-    // assign plan and cohort id
-    buildFragment.setPlanId(planIdGenerator_.getNextId());
-    PlanId parentPlanId = join.getFragment().getPlanId();
-    buildFragment.setCohortId(cohortId);
-
-    planRoots_.add(buildFragment);
-    LOG.info("new build fragment " + buildFragment.getId().toString());
-    LOG.info("in cohort " + buildFragment.getCohortId().toString());
-    LOG.info("for join node " + join.getId().toString());
-    createBuildPlans(buildFragment, null);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PipelinedPlanNodeSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PipelinedPlanNodeSet.java b/fe/src/main/java/com/cloudera/impala/planner/PipelinedPlanNodeSet.java
deleted file mode 100644
index 6714213..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PipelinedPlanNodeSet.java
+++ /dev/null
@@ -1,215 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Represents a set of PlanNodes and DataSinks that execute and consume resources
- * concurrently. PlanNodes and DataSinks in such a pipelined plan node set may belong
- * to different plan fragments because data is streamed across fragments.
- *
- * For example, a series of left-deep joins consists of two plan node sets. The first
- * set contains all build-side nodes. The second set contains the leftmost
- * scan. Both sets contain all join nodes because they execute and consume
- * resources during the build and probe phases. Similarly, all nodes below a 'blocking'
- * node (e.g, an AggregationNode) are placed into a differnet plan node set than the
- * nodes above it, but the blocking node itself belongs to both sets.
- */
-public class PipelinedPlanNodeSet {
-  private final static Logger LOG = LoggerFactory.getLogger(PipelinedPlanNodeSet.class);
-
-  // Minimum per-host resource requirements to ensure that no plan node set can have
-  // estimates of zero, even if the contained PlanNodes have estimates of zero.
-  public static final long MIN_PER_HOST_MEM = 10 * 1024 * 1024;
-  public static final int MIN_PER_HOST_VCORES = 1;
-
-  // List of plan nodes that execute and consume resources concurrently.
-  private final ArrayList<PlanNode> planNodes = Lists.newArrayList();
-
-  // DataSinks that execute and consume resources concurrently.
-  // Primarily used for estimating the cost of insert queries.
-  private final List<DataSink> dataSinks = Lists.newArrayList();
-
-  // Estimated per-host memory and CPU requirements.
-  // Valid after computeResourceEstimates().
-  private long perHostMem = MIN_PER_HOST_MEM;
-  private int perHostVcores = MIN_PER_HOST_VCORES;
-
-  public void add(PlanNode node) {
-    Preconditions.checkNotNull(node.getFragment());
-    planNodes.add(node);
-  }
-
-  public void addSink(DataSink sink) {
-    Preconditions.checkNotNull(sink);
-    dataSinks.add(sink);
-  }
-
-  /**
-   * Computes the estimated per-host memory and CPU requirements of this plan node set.
-   * Optionally excludes unpartitioned fragments from the estimation.
-   * Returns true if at least one plan node was included in the estimation.
-   * Otherwise returns false indicating the estimates are invalid.
-   */
-  public boolean computeResourceEstimates(boolean excludeUnpartitionedFragments,
-      TQueryOptions queryOptions) {
-    Set<PlanFragment> uniqueFragments = Sets.newHashSet();
-
-    // Distinguish the per-host memory estimates for scan nodes and non-scan nodes to
-    // get a tighter estimate on the amount of memory required by multiple concurrent
-    // scans. The memory required by all concurrent scans of the same type (Hdfs/Hbase)
-    // cannot exceed the per-host upper memory bound for that scan type. Intuitively,
-    // the amount of I/O buffers is limited by the disk bandwidth.
-    long perHostHbaseScanMem = 0L;
-    long perHostHdfsScanMem = 0L;
-    long perHostNonScanMem = 0L;
-
-    for (int i = 0; i < planNodes.size(); ++i) {
-      PlanNode node = planNodes.get(i);
-      PlanFragment fragment = node.getFragment();
-      if (!fragment.isPartitioned() && excludeUnpartitionedFragments) continue;
-      node.computeCosts(queryOptions);
-      uniqueFragments.add(fragment);
-      if (node.getPerHostMemCost() < 0) {
-        LOG.warn(String.format("Invalid per-host memory requirement %s of node %s.\n" +
-            "PlanNode stats are: numNodes_=%s ", node.getPerHostMemCost(),
-            node.getClass().getSimpleName(), node.getNumNodes()));
-      }
-      if (node instanceof HBaseScanNode) {
-        perHostHbaseScanMem += node.getPerHostMemCost();
-      } else if (node instanceof HdfsScanNode) {
-        perHostHdfsScanMem += node.getPerHostMemCost();
-      } else {
-        perHostNonScanMem += node.getPerHostMemCost();
-      }
-    }
-
-    // The memory required by concurrent scans cannot exceed the upper memory bound
-    // for that scan type.
-    // TODO: In the future, we may want to restrict scanner concurrency based on a
-    // memory limit. This estimation will need to accoung for that as well.
-    perHostHbaseScanMem =
-        Math.min(perHostHbaseScanMem, HBaseScanNode.getPerHostMemUpperBound());
-    perHostHdfsScanMem =
-        Math.min(perHostHdfsScanMem, HdfsScanNode.getPerHostMemUpperBound());
-
-    long perHostDataSinkMem = 0L;
-    for (int i = 0; i < dataSinks.size(); ++i) {
-      DataSink sink = dataSinks.get(i);
-      PlanFragment fragment = sink.getFragment();
-      if (!fragment.isPartitioned() && excludeUnpartitionedFragments) continue;
-      // Sanity check that this plan-node set has at least one PlanNode of fragment.
-      Preconditions.checkState(uniqueFragments.contains(fragment));
-      sink.computeCosts();
-      if (sink.getPerHostMemCost() < 0) {
-        LOG.warn(String.format("Invalid per-host memory requirement %s of sink %s.\n",
-            sink.getPerHostMemCost(), sink.getClass().getSimpleName()));
-      }
-      perHostDataSinkMem += sink.getPerHostMemCost();
-    }
-
-    // Combine the memory estimates of all sinks, scans nodes and non-scan nodes.
-    long perHostMem = perHostHdfsScanMem + perHostHbaseScanMem + perHostNonScanMem +
-        perHostDataSinkMem;
-
-    // The backend needs at least one thread per fragment.
-    int perHostVcores = uniqueFragments.size();
-
-    // This plan node set might only have unpartitioned fragments.
-    // Only set estimates if they are valid.
-    if (perHostMem >= 0 && perHostVcores >= 0) {
-      this.perHostMem = perHostMem;
-      this.perHostVcores = perHostVcores;
-      return true;
-    }
-    return false;
-  }
-
-  public long getPerHostMem() { return perHostMem; }
-  public int getPerHostVcores() { return perHostVcores; }
-
-  /**
-   * Computes and returns the pipelined plan node sets of the given plan.
-   */
-  public static ArrayList<PipelinedPlanNodeSet> computePlanNodeSets(PlanNode root) {
-    ArrayList<PipelinedPlanNodeSet> planNodeSets =
-        Lists.newArrayList(new PipelinedPlanNodeSet());
-    computePlanNodeSets(root, planNodeSets.get(0), null, planNodeSets);
-    return planNodeSets;
-  }
-
-  /**
-   * Populates 'planNodeSets' by recursively traversing the plan tree rooted at 'node'
-   * The plan node sets are computed top-down. As a result, the plan node sets are added
-   * in reverse order of their runtime execution.
-   *
-   * Nodes are generally added to lhsSet. Joins are treated specially in that their
-   * left child is added to lhsSet and their right child to rhsSet to make sure
-   * that concurrent join builds end up in the same plan node set.
-   */
-  private static void computePlanNodeSets(PlanNode node, PipelinedPlanNodeSet lhsSet,
-      PipelinedPlanNodeSet rhsSet, ArrayList<PipelinedPlanNodeSet> planNodeSets) {
-    lhsSet.add(node);
-    if (node == node.getFragment().getPlanRoot() && node.getFragment().hasSink()) {
-      lhsSet.addSink(node.getFragment().getSink());
-    }
-
-    if (node instanceof HashJoinNode) {
-      // Create a new set for the right-hand sides of joins if necessary.
-      if (rhsSet == null) {
-        rhsSet = new PipelinedPlanNodeSet();
-        planNodeSets.add(rhsSet);
-      }
-      // The join node itself is added to the lhsSet (above) and the rhsSet.
-      rhsSet.add(node);
-      computePlanNodeSets(node.getChild(1), rhsSet, null, planNodeSets);
-      computePlanNodeSets(node.getChild(0), lhsSet, rhsSet, planNodeSets);
-      return;
-    }
-
-    if (node.isBlockingNode()) {
-      // We add blocking nodes to two plan node sets because they require resources while
-      // consuming their input (execution of the preceding set) and while they
-      // emit their output (execution of the following set).
-      lhsSet = new PipelinedPlanNodeSet();
-      lhsSet.add(node);
-      planNodeSets.add(lhsSet);
-      // Join builds under this blocking node belong in a new rhsSet.
-      rhsSet = null;
-    }
-
-    // Assume that non-join, non-blocking nodes with multiple children
-    // (e.g., ExchangeNodes) consume their inputs in an arbitrary order,
-    // i.e., all child subtrees execute concurrently.
-    // TODO: This is not true for UnionNodes anymore. Fix the estimates accordingly.
-    for (PlanNode child: node.getChildren()) {
-      computePlanNodeSets(child, lhsSet, rhsSet, planNodeSets);
-    }
-  }
-}


[51/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
IMPALA-3786: Replace "cloudera" with "apache" (part 1)

As part of the ASF transition, we need to replace references to
Cloudera in Impala with references to Apache. This primarily means
changing Java package names from com.cloudera.impala.* to
org.apache.impala.*

To make this easier to review, this patch only renames files,
eg. fe/src/main/java/com/cloudera -> fe/src/main/java/org/apache

A follow up patch performs the actual code updates.

Change-Id: I3767dd1ee86df767075fdf1b371eb6b0b06668db
Reviewed-on: http://gerrit.cloudera.org:8080/3936
Reviewed-by: Thomas Tauber-Marshall <tm...@cloudera.com>
Reviewed-by: Jim Apple <jb...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/b544f019
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/b544f019
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/b544f019

Branch: refs/heads/master
Commit: b544f019aabc1d3aee4368966be8dab478a28f03
Parents: a5e84ac
Author: Thomas Tauber-Marshall <tm...@cloudera.com>
Authored: Wed Aug 10 15:00:46 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Sep 29 21:13:52 2016 +0000

----------------------------------------------------------------------
 .../extdatasource/util/SerializationUtils.java  |   48 -
 .../extdatasource/v1/ExternalDataSource.java    |   65 -
 .../extdatasource/util/SerializationUtils.java  |   48 +
 .../extdatasource/v1/ExternalDataSource.java    |   65 +
 .../extdatasource/sample/EchoDataSource.java    |   75 -
 .../extdatasource/sample/EchoDataSource.java    |   75 +
 .../extdatasource/AllTypesDataSource.java       |  323 --
 .../extdatasource/AllTypesDataSource.java       |  323 ++
 .../cloudera/impala/analysis/AggregateInfo.java |  742 ----
 .../impala/analysis/AggregateInfoBase.java      |  221 --
 .../analysis/AlterTableAddPartitionStmt.java    |  117 -
 .../analysis/AlterTableAddReplaceColsStmt.java  |  108 -
 .../analysis/AlterTableChangeColStmt.java       |  101 -
 .../impala/analysis/AlterTableDropColStmt.java  |   84 -
 .../analysis/AlterTableDropPartitionStmt.java   |   79 -
 .../analysis/AlterTableOrViewRenameStmt.java    |   90 -
 .../AlterTableRecoverPartitionsStmt.java        |   56 -
 .../analysis/AlterTableSetCachedStmt.java       |   92 -
 .../analysis/AlterTableSetColumnStats.java      |  158 -
 .../analysis/AlterTableSetFileFormatStmt.java   |   57 -
 .../analysis/AlterTableSetLocationStmt.java     |   86 -
 .../impala/analysis/AlterTableSetStmt.java      |   58 -
 .../analysis/AlterTableSetTblProperties.java    |  150 -
 .../impala/analysis/AlterTableStmt.java         |   91 -
 .../cloudera/impala/analysis/AlterViewStmt.java |   71 -
 .../impala/analysis/AnalysisContext.java        |  544 ---
 .../cloudera/impala/analysis/AnalyticExpr.java  |  839 -----
 .../cloudera/impala/analysis/AnalyticInfo.java  |  199 -
 .../impala/analysis/AnalyticWindow.java         |  417 ---
 .../com/cloudera/impala/analysis/Analyzer.java  | 2932 ---------------
 .../impala/analysis/ArithmeticExpr.java         |  268 --
 .../impala/analysis/AuthorizationStmt.java      |   49 -
 .../cloudera/impala/analysis/BaseTableRef.java  |   98 -
 .../impala/analysis/BetweenPredicate.java       |  158 -
 .../impala/analysis/BinaryPredicate.java        |  388 --
 .../cloudera/impala/analysis/BoolLiteral.java   |  113 -
 .../com/cloudera/impala/analysis/CaseExpr.java  |  379 --
 .../impala/analysis/CaseWhenClause.java         |   42 -
 .../com/cloudera/impala/analysis/CastExpr.java  |  312 --
 .../impala/analysis/CollectionStructType.java   |   79 -
 .../impala/analysis/CollectionTableRef.java     |  138 -
 .../com/cloudera/impala/analysis/ColumnDef.java |  143 -
 .../impala/analysis/ColumnLineageGraph.java     |  680 ----
 .../impala/analysis/CompoundPredicate.java      |  216 --
 .../impala/analysis/ComputeStatsStmt.java       |  553 ---
 .../impala/analysis/CreateDataSrcStmt.java      |   97 -
 .../cloudera/impala/analysis/CreateDbStmt.java  |  102 -
 .../impala/analysis/CreateDropRoleStmt.java     |   63 -
 .../impala/analysis/CreateFunctionStmtBase.java |  206 --
 .../analysis/CreateOrAlterViewStmtBase.java     |  209 --
 .../analysis/CreateTableAsSelectStmt.java       |  212 --
 .../impala/analysis/CreateTableDataSrcStmt.java |   95 -
 .../analysis/CreateTableLikeFileStmt.java       |  375 --
 .../impala/analysis/CreateTableLikeStmt.java    |  157 -
 .../impala/analysis/CreateTableStmt.java        |  416 ---
 .../cloudera/impala/analysis/CreateUdaStmt.java |  209 --
 .../cloudera/impala/analysis/CreateUdfStmt.java |  140 -
 .../impala/analysis/CreateViewStmt.java         |   80 -
 .../cloudera/impala/analysis/DeleteStmt.java    |   90 -
 .../impala/analysis/DescribeDbStmt.java         |   71 -
 .../impala/analysis/DescribeTableStmt.java      |  152 -
 .../impala/analysis/DescriptorTable.java        |  198 -
 .../impala/analysis/DistributeParam.java        |  199 -
 .../impala/analysis/DropDataSrcStmt.java        |   61 -
 .../cloudera/impala/analysis/DropDbStmt.java    |   79 -
 .../impala/analysis/DropFunctionStmt.java       |  113 -
 .../cloudera/impala/analysis/DropStatsStmt.java |  105 -
 .../impala/analysis/DropTableOrViewStmt.java    |  115 -
 .../impala/analysis/EquivalenceClassId.java     |   37 -
 .../impala/analysis/ExistsPredicate.java        |   82 -
 .../java/com/cloudera/impala/analysis/Expr.java | 1258 -------
 .../com/cloudera/impala/analysis/ExprId.java    |   37 -
 .../impala/analysis/ExprSubstitutionMap.java    |  176 -
 .../impala/analysis/ExtractFromExpr.java        |  111 -
 .../cloudera/impala/analysis/FromClause.java    |  129 -
 .../cloudera/impala/analysis/FunctionArgs.java  |   67 -
 .../impala/analysis/FunctionCallExpr.java       |  516 ---
 .../cloudera/impala/analysis/FunctionName.java  |  148 -
 .../impala/analysis/FunctionParams.java         |   68 -
 .../impala/analysis/GrantRevokePrivStmt.java    |   94 -
 .../impala/analysis/GrantRevokeRoleStmt.java    |   72 -
 .../cloudera/impala/analysis/HdfsCachingOp.java |   93 -
 .../com/cloudera/impala/analysis/HdfsUri.java   |  128 -
 .../cloudera/impala/analysis/InPredicate.java   |  234 --
 .../cloudera/impala/analysis/InlineViewRef.java |  339 --
 .../cloudera/impala/analysis/InsertStmt.java    |  697 ----
 .../impala/analysis/IsNotEmptyPredicate.java    |   65 -
 .../impala/analysis/IsNullPredicate.java        |  186 -
 .../cloudera/impala/analysis/JoinOperator.java  |  112 -
 .../cloudera/impala/analysis/LikePredicate.java |  169 -
 .../cloudera/impala/analysis/LimitElement.java  |  183 -
 .../cloudera/impala/analysis/LiteralExpr.java   |  242 --
 .../cloudera/impala/analysis/LoadDataStmt.java  |  241 --
 .../cloudera/impala/analysis/ModifyStmt.java    |  292 --
 .../cloudera/impala/analysis/NullLiteral.java   |   90 -
 .../impala/analysis/NumericLiteral.java         |  317 --
 .../impala/analysis/OrderByElement.java         |  156 -
 .../com/cloudera/impala/analysis/ParseNode.java |   34 -
 .../impala/analysis/PartitionKeyValue.java      |   88 -
 .../impala/analysis/PartitionListItem.java      |   37 -
 .../cloudera/impala/analysis/PartitionSpec.java |  201 -
 .../java/com/cloudera/impala/analysis/Path.java |  448 ---
 .../com/cloudera/impala/analysis/Predicate.java |   97 -
 .../cloudera/impala/analysis/PrivilegeSpec.java |  287 --
 .../com/cloudera/impala/analysis/QueryStmt.java |  458 ---
 .../impala/analysis/ResetMetadataStmt.java      |  110 -
 .../cloudera/impala/analysis/SelectList.java    |   94 -
 .../impala/analysis/SelectListItem.java         |  129 -
 .../cloudera/impala/analysis/SelectStmt.java    | 1021 -----
 .../com/cloudera/impala/analysis/SetStmt.java   |   69 -
 .../impala/analysis/ShowCreateFunctionStmt.java |   74 -
 .../impala/analysis/ShowCreateTableStmt.java    |   80 -
 .../impala/analysis/ShowDataSrcsStmt.java       |   72 -
 .../cloudera/impala/analysis/ShowDbsStmt.java   |   74 -
 .../cloudera/impala/analysis/ShowFilesStmt.java |   90 -
 .../impala/analysis/ShowFunctionsStmt.java      |   94 -
 .../impala/analysis/ShowGrantRoleStmt.java      |   77 -
 .../impala/analysis/ShowPartitionsStmt.java     |   55 -
 .../cloudera/impala/analysis/ShowRolesStmt.java |   72 -
 .../cloudera/impala/analysis/ShowStatsStmt.java |   65 -
 .../impala/analysis/ShowTablesStmt.java         |  120 -
 .../impala/analysis/SingularRowSrcTableRef.java |   64 -
 .../impala/analysis/SlotDescriptor.java         |  261 --
 .../com/cloudera/impala/analysis/SlotId.java    |   37 -
 .../com/cloudera/impala/analysis/SlotRef.java   |  243 --
 .../com/cloudera/impala/analysis/SortInfo.java  |  131 -
 .../cloudera/impala/analysis/StatementBase.java |  141 -
 .../cloudera/impala/analysis/StmtRewriter.java  |  905 -----
 .../cloudera/impala/analysis/StringLiteral.java |  174 -
 .../com/cloudera/impala/analysis/Subquery.java  |  157 -
 .../com/cloudera/impala/analysis/TableName.java |  123 -
 .../com/cloudera/impala/analysis/TableRef.java  |  587 ---
 .../analysis/TimestampArithmeticExpr.java       |  215 --
 .../cloudera/impala/analysis/ToSqlUtils.java    |  348 --
 .../cloudera/impala/analysis/TruncateStmt.java  |   82 -
 .../impala/analysis/TupleDescriptor.java        |  310 --
 .../com/cloudera/impala/analysis/TupleId.java   |   40 -
 .../impala/analysis/TupleIsNullPredicate.java   |  185 -
 .../com/cloudera/impala/analysis/TypeDef.java   |  159 -
 .../com/cloudera/impala/analysis/TypesUtil.java |  237 --
 .../com/cloudera/impala/analysis/UnionStmt.java |  607 ---
 .../cloudera/impala/analysis/UpdateStmt.java    |  117 -
 .../com/cloudera/impala/analysis/UseStmt.java   |   55 -
 .../cloudera/impala/analysis/ValuesStmt.java    |   82 -
 .../cloudera/impala/analysis/WithClause.java    |  140 -
 .../authorization/AuthorizationChecker.java     |  190 -
 .../authorization/AuthorizationConfig.java      |  148 -
 .../impala/authorization/Authorizeable.java     |   56 -
 .../authorization/AuthorizeableColumn.java      |   66 -
 .../impala/authorization/AuthorizeableDb.java   |   48 -
 .../impala/authorization/AuthorizeableFn.java   |   45 -
 .../authorization/AuthorizeableServer.java      |   49 -
 .../authorization/AuthorizeableTable.java       |   63 -
 .../impala/authorization/AuthorizeableUri.java  |   47 -
 .../authorization/ImpalaInternalAdminUser.java  |   36 -
 .../impala/authorization/Privilege.java         |   70 -
 .../impala/authorization/PrivilegeRequest.java  |   77 -
 .../authorization/PrivilegeRequestBuilder.java  |  119 -
 .../impala/authorization/SentryConfig.java      |   74 -
 .../com/cloudera/impala/authorization/User.java |  104 -
 .../impala/catalog/AggregateFunction.java       |  241 --
 .../com/cloudera/impala/catalog/ArrayType.java  |   71 -
 .../impala/catalog/AuthorizationException.java  |   32 -
 .../impala/catalog/AuthorizationPolicy.java     |  340 --
 .../com/cloudera/impala/catalog/BuiltinsDb.java | 1052 ------
 .../com/cloudera/impala/catalog/Catalog.java    |  522 ---
 .../impala/catalog/CatalogDeltaLog.java         |  120 -
 .../impala/catalog/CatalogException.java        |   36 -
 .../cloudera/impala/catalog/CatalogObject.java  |   40 -
 .../impala/catalog/CatalogObjectCache.java      |  148 -
 .../impala/catalog/CatalogServiceCatalog.java   | 1268 -------
 .../com/cloudera/impala/catalog/Column.java     |  132 -
 .../impala/catalog/ColumnNotFoundException.java |   31 -
 .../cloudera/impala/catalog/ColumnStats.java    |  334 --
 .../com/cloudera/impala/catalog/DataSource.java |   88 -
 .../impala/catalog/DataSourceTable.java         |  259 --
 .../catalog/DatabaseNotFoundException.java      |   29 -
 .../java/com/cloudera/impala/catalog/Db.java    |  495 ---
 .../com/cloudera/impala/catalog/Function.java   |  488 ---
 .../cloudera/impala/catalog/HBaseColumn.java    |   67 -
 .../com/cloudera/impala/catalog/HBaseTable.java |  853 -----
 .../cloudera/impala/catalog/HdfsCachePool.java  |   65 -
 .../impala/catalog/HdfsCompression.java         |   85 -
 .../cloudera/impala/catalog/HdfsFileFormat.java |  261 --
 .../cloudera/impala/catalog/HdfsPartition.java  |  791 ----
 .../HdfsPartitionLocationCompressor.java        |  153 -
 .../impala/catalog/HdfsStorageDescriptor.java   |  240 --
 .../com/cloudera/impala/catalog/HdfsTable.java  | 1958 ----------
 .../catalog/HiveStorageDescriptorFactory.java   |   63 -
 .../cloudera/impala/catalog/ImpaladCatalog.java |  448 ---
 .../impala/catalog/IncompleteTable.java         |  134 -
 .../com/cloudera/impala/catalog/KuduColumn.java |   52 -
 .../com/cloudera/impala/catalog/KuduTable.java  |  278 --
 .../com/cloudera/impala/catalog/MapType.java    |   80 -
 .../impala/catalog/MetaStoreClientPool.java     |  189 -
 .../catalog/PartitionNotFoundException.java     |   31 -
 .../impala/catalog/PartitionStatsUtil.java      |  141 -
 .../cloudera/impala/catalog/PrimitiveType.java  |  103 -
 .../java/com/cloudera/impala/catalog/Role.java  |  144 -
 .../cloudera/impala/catalog/RolePrivilege.java  |  138 -
 .../com/cloudera/impala/catalog/RowFormat.java  |  109 -
 .../cloudera/impala/catalog/ScalarFunction.java |  295 --
 .../com/cloudera/impala/catalog/ScalarType.java |  478 ---
 .../cloudera/impala/catalog/StructField.java    |   92 -
 .../com/cloudera/impala/catalog/StructType.java |  107 -
 .../java/com/cloudera/impala/catalog/Table.java |  484 ---
 .../com/cloudera/impala/catalog/TableId.java    |   42 -
 .../cloudera/impala/catalog/TableLoader.java    |  101 -
 .../impala/catalog/TableLoadingException.java   |   35 -
 .../impala/catalog/TableLoadingMgr.java         |  333 --
 .../impala/catalog/TableNotFoundException.java  |   31 -
 .../java/com/cloudera/impala/catalog/Type.java  |  781 ----
 .../java/com/cloudera/impala/catalog/View.java  |  207 --
 .../impala/catalog/delegates/DdlDelegate.java   |   75 -
 .../catalog/delegates/KuduDdlDelegate.java      |  190 -
 .../delegates/UnsupportedOpDelegate.java        |   35 -
 .../cloudera/impala/common/AliasGenerator.java  |   48 -
 .../impala/common/AnalysisException.java        |   32 -
 .../com/cloudera/impala/common/ByteUnits.java   |   49 -
 .../impala/common/ColumnAliasGenerator.java     |   32 -
 .../cloudera/impala/common/FileSystemUtil.java  |  409 --
 .../java/com/cloudera/impala/common/Id.java     |   76 -
 .../com/cloudera/impala/common/IdGenerator.java |   28 -
 .../cloudera/impala/common/ImpalaException.java |   33 -
 .../impala/common/ImpalaRuntimeException.java   |   32 -
 .../impala/common/InternalException.java        |   32 -
 .../com/cloudera/impala/common/JniUtil.java     |  165 -
 .../impala/common/NotImplementedException.java  |   28 -
 .../java/com/cloudera/impala/common/Pair.java   |   55 -
 .../com/cloudera/impala/common/PrintUtils.java  |   86 -
 .../com/cloudera/impala/common/Reference.java   |   42 -
 .../com/cloudera/impala/common/RuntimeEnv.java  |   71 -
 .../impala/common/TableAliasGenerator.java      |   37 -
 .../com/cloudera/impala/common/TreeNode.java    |  182 -
 .../impala/extdatasource/ApiVersion.java        |   53 -
 .../ExternalDataSourceExecutor.java             |  267 --
 .../hive/executor/ImpalaBigIntWritable.java     |   38 -
 .../hive/executor/ImpalaBooleanWritable.java    |   38 -
 .../hive/executor/ImpalaBytesWritable.java      |   56 -
 .../hive/executor/ImpalaDoubleWritable.java     |   38 -
 .../hive/executor/ImpalaFloatWritable.java      |   38 -
 .../impala/hive/executor/ImpalaIntWritable.java |   38 -
 .../hive/executor/ImpalaSmallIntWritable.java   |   38 -
 .../hive/executor/ImpalaStringWritable.java     |  117 -
 .../hive/executor/ImpalaTextWritable.java       |   47 -
 .../hive/executor/ImpalaTinyIntWritable.java    |   38 -
 .../impala/hive/executor/UdfExecutor.java       |  643 ----
 .../impala/planner/AggregationNode.java         |  292 --
 .../impala/planner/AnalyticEvalNode.java        |  249 --
 .../impala/planner/AnalyticPlanner.java         |  815 ----
 .../com/cloudera/impala/planner/CohortId.java   |   42 -
 .../cloudera/impala/planner/DataPartition.java  |  131 -
 .../com/cloudera/impala/planner/DataSink.java   |   64 -
 .../impala/planner/DataSourceScanNode.java      |  371 --
 .../cloudera/impala/planner/DataStreamSink.java |   61 -
 .../impala/planner/DistributedPlanner.java      | 1019 -----
 .../cloudera/impala/planner/EmptySetNode.java   |   71 -
 .../cloudera/impala/planner/ExchangeNode.java   |  204 -
 .../cloudera/impala/planner/HBaseScanNode.java  |  510 ---
 .../cloudera/impala/planner/HBaseTableSink.java |   59 -
 .../cloudera/impala/planner/HashJoinNode.java   |  193 -
 .../impala/planner/HdfsPartitionFilter.java     |  127 -
 .../impala/planner/HdfsPartitionPruner.java     |  475 ---
 .../cloudera/impala/planner/HdfsScanNode.java   |  677 ----
 .../cloudera/impala/planner/HdfsTableSink.java  |  157 -
 .../cloudera/impala/planner/JoinBuildSink.java  |  103 -
 .../com/cloudera/impala/planner/JoinNode.java   |  508 ---
 .../cloudera/impala/planner/JoinTableId.java    |   47 -
 .../cloudera/impala/planner/KuduScanNode.java   |  358 --
 .../cloudera/impala/planner/KuduTableSink.java  |   88 -
 .../impala/planner/NestedLoopJoinNode.java      |  133 -
 .../impala/planner/ParallelPlanner.java         |  205 --
 .../impala/planner/PipelinedPlanNodeSet.java    |  215 --
 .../cloudera/impala/planner/PlanFragment.java   |  388 --
 .../cloudera/impala/planner/PlanFragmentId.java |   42 -
 .../com/cloudera/impala/planner/PlanId.java     |   42 -
 .../com/cloudera/impala/planner/PlanNode.java   |  715 ----
 .../com/cloudera/impala/planner/PlanNodeId.java |   42 -
 .../com/cloudera/impala/planner/Planner.java    |  456 ---
 .../cloudera/impala/planner/PlannerContext.java |  100 -
 .../impala/planner/RuntimeFilterGenerator.java  |  590 ---
 .../impala/planner/RuntimeFilterId.java         |   45 -
 .../com/cloudera/impala/planner/ScanNode.java   |  191 -
 .../com/cloudera/impala/planner/SelectNode.java |   93 -
 .../impala/planner/SingleNodePlanner.java       | 1594 --------
 .../impala/planner/SingularRowSrcNode.java      |   82 -
 .../com/cloudera/impala/planner/SortNode.java   |  242 --
 .../cloudera/impala/planner/SubplanNode.java    |  109 -
 .../com/cloudera/impala/planner/TableSink.java  |  114 -
 .../com/cloudera/impala/planner/UnionNode.java  |  231 --
 .../com/cloudera/impala/planner/UnnestNode.java |  107 -
 .../com/cloudera/impala/planner/ValueRange.java |  121 -
 .../cloudera/impala/service/BackendConfig.java  |   49 -
 .../impala/service/CatalogOpExecutor.java       | 2990 ---------------
 .../impala/service/DescribeResultFactory.java   |  246 --
 .../com/cloudera/impala/service/FeSupport.java  |  292 --
 .../com/cloudera/impala/service/Frontend.java   | 1231 -------
 .../com/cloudera/impala/service/JniCatalog.java |  255 --
 .../cloudera/impala/service/JniFrontend.java    |  743 ----
 .../com/cloudera/impala/service/MetadataOp.java |  641 ----
 .../impala/util/AvroSchemaConverter.java        |  209 --
 .../cloudera/impala/util/AvroSchemaParser.java  |  204 -
 .../cloudera/impala/util/AvroSchemaUtils.java   |  189 -
 .../com/cloudera/impala/util/DisjointSet.java   |  142 -
 .../com/cloudera/impala/util/EventSequence.java |   58 -
 .../cloudera/impala/util/FileWatchService.java  |  140 -
 .../impala/util/FsPermissionChecker.java        |  301 --
 .../com/cloudera/impala/util/GlogAppender.java  |  129 -
 .../cloudera/impala/util/HdfsCachingUtil.java   |  515 ---
 .../java/com/cloudera/impala/util/KuduUtil.java |  268 --
 .../java/com/cloudera/impala/util/ListMap.java  |   77 -
 .../impala/util/MaxRowsProcessedVisitor.java    |   66 -
 .../impala/util/MembershipSnapshot.java         |   84 -
 .../com/cloudera/impala/util/MetaStoreUtil.java |  172 -
 .../com/cloudera/impala/util/NativeLibUtil.java |   44 -
 .../com/cloudera/impala/util/NativeLogger.java  |   51 -
 .../cloudera/impala/util/PatternMatcher.java    |  108 -
 .../impala/util/RequestPoolService.java         |  451 ---
 .../impala/util/SentryPolicyService.java        |  466 ---
 .../com/cloudera/impala/util/SentryProxy.java   |  348 --
 .../com/cloudera/impala/util/StatsHelper.java   |   60 -
 .../cloudera/impala/util/TAccessLevelUtil.java  |   35 -
 .../cloudera/impala/util/TColumnValueUtil.java  |   51 -
 .../cloudera/impala/util/TResultRowBuilder.java |   64 -
 .../cloudera/impala/util/TSessionStateUtil.java |   38 -
 .../com/cloudera/impala/util/UnsafeUtil.java    |   68 -
 .../java/com/cloudera/impala/util/Visitor.java  |   27 -
 .../apache/impala/analysis/AggregateInfo.java   |  742 ++++
 .../impala/analysis/AggregateInfoBase.java      |  221 ++
 .../analysis/AlterTableAddPartitionStmt.java    |  117 +
 .../analysis/AlterTableAddReplaceColsStmt.java  |  108 +
 .../analysis/AlterTableChangeColStmt.java       |  101 +
 .../impala/analysis/AlterTableDropColStmt.java  |   84 +
 .../analysis/AlterTableDropPartitionStmt.java   |   79 +
 .../analysis/AlterTableOrViewRenameStmt.java    |   90 +
 .../AlterTableRecoverPartitionsStmt.java        |   56 +
 .../analysis/AlterTableSetCachedStmt.java       |   92 +
 .../analysis/AlterTableSetColumnStats.java      |  158 +
 .../analysis/AlterTableSetFileFormatStmt.java   |   57 +
 .../analysis/AlterTableSetLocationStmt.java     |   86 +
 .../impala/analysis/AlterTableSetStmt.java      |   58 +
 .../analysis/AlterTableSetTblProperties.java    |  150 +
 .../apache/impala/analysis/AlterTableStmt.java  |   91 +
 .../apache/impala/analysis/AlterViewStmt.java   |   71 +
 .../apache/impala/analysis/AnalysisContext.java |  544 +++
 .../apache/impala/analysis/AnalyticExpr.java    |  839 +++++
 .../apache/impala/analysis/AnalyticInfo.java    |  199 +
 .../apache/impala/analysis/AnalyticWindow.java  |  417 +++
 .../org/apache/impala/analysis/Analyzer.java    | 2932 +++++++++++++++
 .../apache/impala/analysis/ArithmeticExpr.java  |  268 ++
 .../impala/analysis/AuthorizationStmt.java      |   49 +
 .../apache/impala/analysis/BaseTableRef.java    |   98 +
 .../impala/analysis/BetweenPredicate.java       |  158 +
 .../apache/impala/analysis/BinaryPredicate.java |  388 ++
 .../org/apache/impala/analysis/BoolLiteral.java |  113 +
 .../org/apache/impala/analysis/CaseExpr.java    |  379 ++
 .../apache/impala/analysis/CaseWhenClause.java  |   42 +
 .../org/apache/impala/analysis/CastExpr.java    |  312 ++
 .../impala/analysis/CollectionStructType.java   |   79 +
 .../impala/analysis/CollectionTableRef.java     |  138 +
 .../org/apache/impala/analysis/ColumnDef.java   |  143 +
 .../impala/analysis/ColumnLineageGraph.java     |  680 ++++
 .../impala/analysis/CompoundPredicate.java      |  216 ++
 .../impala/analysis/ComputeStatsStmt.java       |  553 +++
 .../impala/analysis/CreateDataSrcStmt.java      |   97 +
 .../apache/impala/analysis/CreateDbStmt.java    |  102 +
 .../impala/analysis/CreateDropRoleStmt.java     |   63 +
 .../impala/analysis/CreateFunctionStmtBase.java |  206 ++
 .../analysis/CreateOrAlterViewStmtBase.java     |  209 ++
 .../analysis/CreateTableAsSelectStmt.java       |  212 ++
 .../impala/analysis/CreateTableDataSrcStmt.java |   95 +
 .../analysis/CreateTableLikeFileStmt.java       |  375 ++
 .../impala/analysis/CreateTableLikeStmt.java    |  157 +
 .../apache/impala/analysis/CreateTableStmt.java |  416 +++
 .../apache/impala/analysis/CreateUdaStmt.java   |  209 ++
 .../apache/impala/analysis/CreateUdfStmt.java   |  140 +
 .../apache/impala/analysis/CreateViewStmt.java  |   80 +
 .../org/apache/impala/analysis/DeleteStmt.java  |   90 +
 .../apache/impala/analysis/DescribeDbStmt.java  |   71 +
 .../impala/analysis/DescribeTableStmt.java      |  152 +
 .../apache/impala/analysis/DescriptorTable.java |  198 +
 .../apache/impala/analysis/DistributeParam.java |  199 +
 .../apache/impala/analysis/DropDataSrcStmt.java |   61 +
 .../org/apache/impala/analysis/DropDbStmt.java  |   79 +
 .../impala/analysis/DropFunctionStmt.java       |  113 +
 .../apache/impala/analysis/DropStatsStmt.java   |  105 +
 .../impala/analysis/DropTableOrViewStmt.java    |  115 +
 .../impala/analysis/EquivalenceClassId.java     |   37 +
 .../apache/impala/analysis/ExistsPredicate.java |   82 +
 .../java/org/apache/impala/analysis/Expr.java   | 1258 +++++++
 .../java/org/apache/impala/analysis/ExprId.java |   37 +
 .../impala/analysis/ExprSubstitutionMap.java    |  176 +
 .../apache/impala/analysis/ExtractFromExpr.java |  111 +
 .../org/apache/impala/analysis/FromClause.java  |  129 +
 .../apache/impala/analysis/FunctionArgs.java    |   67 +
 .../impala/analysis/FunctionCallExpr.java       |  516 +++
 .../apache/impala/analysis/FunctionName.java    |  148 +
 .../apache/impala/analysis/FunctionParams.java  |   68 +
 .../impala/analysis/GrantRevokePrivStmt.java    |   94 +
 .../impala/analysis/GrantRevokeRoleStmt.java    |   72 +
 .../apache/impala/analysis/HdfsCachingOp.java   |   93 +
 .../org/apache/impala/analysis/HdfsUri.java     |  128 +
 .../org/apache/impala/analysis/InPredicate.java |  234 ++
 .../apache/impala/analysis/InlineViewRef.java   |  339 ++
 .../org/apache/impala/analysis/InsertStmt.java  |  697 ++++
 .../impala/analysis/IsNotEmptyPredicate.java    |   65 +
 .../apache/impala/analysis/IsNullPredicate.java |  186 +
 .../apache/impala/analysis/JoinOperator.java    |  112 +
 .../apache/impala/analysis/LikePredicate.java   |  169 +
 .../apache/impala/analysis/LimitElement.java    |  183 +
 .../org/apache/impala/analysis/LiteralExpr.java |  242 ++
 .../apache/impala/analysis/LoadDataStmt.java    |  241 ++
 .../org/apache/impala/analysis/ModifyStmt.java  |  292 ++
 .../org/apache/impala/analysis/NullLiteral.java |   90 +
 .../apache/impala/analysis/NumericLiteral.java  |  317 ++
 .../apache/impala/analysis/OrderByElement.java  |  156 +
 .../org/apache/impala/analysis/ParseNode.java   |   34 +
 .../impala/analysis/PartitionKeyValue.java      |   88 +
 .../impala/analysis/PartitionListItem.java      |   37 +
 .../apache/impala/analysis/PartitionSpec.java   |  201 +
 .../java/org/apache/impala/analysis/Path.java   |  448 +++
 .../org/apache/impala/analysis/Predicate.java   |   97 +
 .../apache/impala/analysis/PrivilegeSpec.java   |  287 ++
 .../org/apache/impala/analysis/QueryStmt.java   |  458 +++
 .../impala/analysis/ResetMetadataStmt.java      |  110 +
 .../org/apache/impala/analysis/SelectList.java  |   94 +
 .../apache/impala/analysis/SelectListItem.java  |  129 +
 .../org/apache/impala/analysis/SelectStmt.java  | 1021 +++++
 .../org/apache/impala/analysis/SetStmt.java     |   69 +
 .../impala/analysis/ShowCreateFunctionStmt.java |   74 +
 .../impala/analysis/ShowCreateTableStmt.java    |   80 +
 .../impala/analysis/ShowDataSrcsStmt.java       |   72 +
 .../org/apache/impala/analysis/ShowDbsStmt.java |   74 +
 .../apache/impala/analysis/ShowFilesStmt.java   |   90 +
 .../impala/analysis/ShowFunctionsStmt.java      |   94 +
 .../impala/analysis/ShowGrantRoleStmt.java      |   77 +
 .../impala/analysis/ShowPartitionsStmt.java     |   55 +
 .../apache/impala/analysis/ShowRolesStmt.java   |   72 +
 .../apache/impala/analysis/ShowStatsStmt.java   |   65 +
 .../apache/impala/analysis/ShowTablesStmt.java  |  120 +
 .../impala/analysis/SingularRowSrcTableRef.java |   64 +
 .../apache/impala/analysis/SlotDescriptor.java  |  261 ++
 .../java/org/apache/impala/analysis/SlotId.java |   37 +
 .../org/apache/impala/analysis/SlotRef.java     |  243 ++
 .../org/apache/impala/analysis/SortInfo.java    |  131 +
 .../apache/impala/analysis/StatementBase.java   |  141 +
 .../apache/impala/analysis/StmtRewriter.java    |  905 +++++
 .../apache/impala/analysis/StringLiteral.java   |  174 +
 .../org/apache/impala/analysis/Subquery.java    |  157 +
 .../org/apache/impala/analysis/TableName.java   |  123 +
 .../org/apache/impala/analysis/TableRef.java    |  587 +++
 .../analysis/TimestampArithmeticExpr.java       |  215 ++
 .../org/apache/impala/analysis/ToSqlUtils.java  |  348 ++
 .../apache/impala/analysis/TruncateStmt.java    |   82 +
 .../apache/impala/analysis/TupleDescriptor.java |  310 ++
 .../org/apache/impala/analysis/TupleId.java     |   40 +
 .../impala/analysis/TupleIsNullPredicate.java   |  185 +
 .../org/apache/impala/analysis/TypeDef.java     |  159 +
 .../org/apache/impala/analysis/TypesUtil.java   |  237 ++
 .../org/apache/impala/analysis/UnionStmt.java   |  607 +++
 .../org/apache/impala/analysis/UpdateStmt.java  |  117 +
 .../org/apache/impala/analysis/UseStmt.java     |   55 +
 .../org/apache/impala/analysis/ValuesStmt.java  |   82 +
 .../org/apache/impala/analysis/WithClause.java  |  140 +
 .../authorization/AuthorizationChecker.java     |  190 +
 .../authorization/AuthorizationConfig.java      |  148 +
 .../impala/authorization/Authorizeable.java     |   56 +
 .../authorization/AuthorizeableColumn.java      |   66 +
 .../impala/authorization/AuthorizeableDb.java   |   48 +
 .../impala/authorization/AuthorizeableFn.java   |   45 +
 .../authorization/AuthorizeableServer.java      |   49 +
 .../authorization/AuthorizeableTable.java       |   63 +
 .../impala/authorization/AuthorizeableUri.java  |   47 +
 .../authorization/ImpalaInternalAdminUser.java  |   36 +
 .../apache/impala/authorization/Privilege.java  |   70 +
 .../impala/authorization/PrivilegeRequest.java  |   77 +
 .../authorization/PrivilegeRequestBuilder.java  |  119 +
 .../impala/authorization/SentryConfig.java      |   74 +
 .../org/apache/impala/authorization/User.java   |  104 +
 .../impala/catalog/AggregateFunction.java       |  241 ++
 .../org/apache/impala/catalog/ArrayType.java    |   71 +
 .../impala/catalog/AuthorizationException.java  |   32 +
 .../impala/catalog/AuthorizationPolicy.java     |  340 ++
 .../org/apache/impala/catalog/BuiltinsDb.java   | 1052 ++++++
 .../java/org/apache/impala/catalog/Catalog.java |  522 +++
 .../apache/impala/catalog/CatalogDeltaLog.java  |  120 +
 .../apache/impala/catalog/CatalogException.java |   36 +
 .../apache/impala/catalog/CatalogObject.java    |   40 +
 .../impala/catalog/CatalogObjectCache.java      |  148 +
 .../impala/catalog/CatalogServiceCatalog.java   | 1268 +++++++
 .../java/org/apache/impala/catalog/Column.java  |  132 +
 .../impala/catalog/ColumnNotFoundException.java |   31 +
 .../org/apache/impala/catalog/ColumnStats.java  |  334 ++
 .../org/apache/impala/catalog/DataSource.java   |   88 +
 .../apache/impala/catalog/DataSourceTable.java  |  259 ++
 .../catalog/DatabaseNotFoundException.java      |   29 +
 .../main/java/org/apache/impala/catalog/Db.java |  495 +++
 .../org/apache/impala/catalog/Function.java     |  488 +++
 .../org/apache/impala/catalog/HBaseColumn.java  |   67 +
 .../org/apache/impala/catalog/HBaseTable.java   |  853 +++++
 .../apache/impala/catalog/HdfsCachePool.java    |   65 +
 .../apache/impala/catalog/HdfsCompression.java  |   85 +
 .../apache/impala/catalog/HdfsFileFormat.java   |  261 ++
 .../apache/impala/catalog/HdfsPartition.java    |  791 ++++
 .../HdfsPartitionLocationCompressor.java        |  153 +
 .../impala/catalog/HdfsStorageDescriptor.java   |  240 ++
 .../org/apache/impala/catalog/HdfsTable.java    | 1958 ++++++++++
 .../catalog/HiveStorageDescriptorFactory.java   |   63 +
 .../apache/impala/catalog/ImpaladCatalog.java   |  448 +++
 .../apache/impala/catalog/IncompleteTable.java  |  134 +
 .../org/apache/impala/catalog/KuduColumn.java   |   52 +
 .../org/apache/impala/catalog/KuduTable.java    |  278 ++
 .../java/org/apache/impala/catalog/MapType.java |   80 +
 .../impala/catalog/MetaStoreClientPool.java     |  189 +
 .../catalog/PartitionNotFoundException.java     |   31 +
 .../impala/catalog/PartitionStatsUtil.java      |  141 +
 .../apache/impala/catalog/PrimitiveType.java    |  103 +
 .../java/org/apache/impala/catalog/Role.java    |  144 +
 .../apache/impala/catalog/RolePrivilege.java    |  138 +
 .../org/apache/impala/catalog/RowFormat.java    |  109 +
 .../apache/impala/catalog/ScalarFunction.java   |  295 ++
 .../org/apache/impala/catalog/ScalarType.java   |  478 +++
 .../org/apache/impala/catalog/StructField.java  |   92 +
 .../org/apache/impala/catalog/StructType.java   |  107 +
 .../java/org/apache/impala/catalog/Table.java   |  484 +++
 .../java/org/apache/impala/catalog/TableId.java |   42 +
 .../org/apache/impala/catalog/TableLoader.java  |  101 +
 .../impala/catalog/TableLoadingException.java   |   35 +
 .../apache/impala/catalog/TableLoadingMgr.java  |  333 ++
 .../impala/catalog/TableNotFoundException.java  |   31 +
 .../java/org/apache/impala/catalog/Type.java    |  781 ++++
 .../java/org/apache/impala/catalog/View.java    |  207 ++
 .../impala/catalog/delegates/DdlDelegate.java   |   75 +
 .../catalog/delegates/KuduDdlDelegate.java      |  190 +
 .../delegates/UnsupportedOpDelegate.java        |   35 +
 .../apache/impala/common/AliasGenerator.java    |   48 +
 .../apache/impala/common/AnalysisException.java |   32 +
 .../org/apache/impala/common/ByteUnits.java     |   49 +
 .../impala/common/ColumnAliasGenerator.java     |   32 +
 .../apache/impala/common/FileSystemUtil.java    |  409 ++
 .../main/java/org/apache/impala/common/Id.java  |   76 +
 .../org/apache/impala/common/IdGenerator.java   |   28 +
 .../apache/impala/common/ImpalaException.java   |   33 +
 .../impala/common/ImpalaRuntimeException.java   |   32 +
 .../apache/impala/common/InternalException.java |   32 +
 .../java/org/apache/impala/common/JniUtil.java  |  165 +
 .../impala/common/NotImplementedException.java  |   28 +
 .../java/org/apache/impala/common/Pair.java     |   55 +
 .../org/apache/impala/common/PrintUtils.java    |   86 +
 .../org/apache/impala/common/Reference.java     |   42 +
 .../org/apache/impala/common/RuntimeEnv.java    |   71 +
 .../impala/common/TableAliasGenerator.java      |   37 +
 .../java/org/apache/impala/common/TreeNode.java |  182 +
 .../apache/impala/extdatasource/ApiVersion.java |   53 +
 .../ExternalDataSourceExecutor.java             |  267 ++
 .../hive/executor/ImpalaBigIntWritable.java     |   38 +
 .../hive/executor/ImpalaBooleanWritable.java    |   38 +
 .../hive/executor/ImpalaBytesWritable.java      |   56 +
 .../hive/executor/ImpalaDoubleWritable.java     |   38 +
 .../hive/executor/ImpalaFloatWritable.java      |   38 +
 .../impala/hive/executor/ImpalaIntWritable.java |   38 +
 .../hive/executor/ImpalaSmallIntWritable.java   |   38 +
 .../hive/executor/ImpalaStringWritable.java     |  117 +
 .../hive/executor/ImpalaTextWritable.java       |   47 +
 .../hive/executor/ImpalaTinyIntWritable.java    |   38 +
 .../impala/hive/executor/UdfExecutor.java       |  643 ++++
 .../apache/impala/planner/AggregationNode.java  |  292 ++
 .../apache/impala/planner/AnalyticEvalNode.java |  249 ++
 .../apache/impala/planner/AnalyticPlanner.java  |  815 ++++
 .../org/apache/impala/planner/CohortId.java     |   42 +
 .../apache/impala/planner/DataPartition.java    |  131 +
 .../org/apache/impala/planner/DataSink.java     |   64 +
 .../impala/planner/DataSourceScanNode.java      |  371 ++
 .../apache/impala/planner/DataStreamSink.java   |   61 +
 .../impala/planner/DistributedPlanner.java      | 1019 +++++
 .../org/apache/impala/planner/EmptySetNode.java |   71 +
 .../org/apache/impala/planner/ExchangeNode.java |  204 +
 .../apache/impala/planner/HBaseScanNode.java    |  510 +++
 .../apache/impala/planner/HBaseTableSink.java   |   59 +
 .../org/apache/impala/planner/HashJoinNode.java |  193 +
 .../impala/planner/HdfsPartitionFilter.java     |  127 +
 .../impala/planner/HdfsPartitionPruner.java     |  475 +++
 .../org/apache/impala/planner/HdfsScanNode.java |  677 ++++
 .../apache/impala/planner/HdfsTableSink.java    |  157 +
 .../apache/impala/planner/JoinBuildSink.java    |  103 +
 .../org/apache/impala/planner/JoinNode.java     |  508 +++
 .../org/apache/impala/planner/JoinTableId.java  |   47 +
 .../org/apache/impala/planner/KuduScanNode.java |  358 ++
 .../apache/impala/planner/KuduTableSink.java    |   88 +
 .../impala/planner/NestedLoopJoinNode.java      |  133 +
 .../apache/impala/planner/ParallelPlanner.java  |  205 ++
 .../impala/planner/PipelinedPlanNodeSet.java    |  215 ++
 .../org/apache/impala/planner/PlanFragment.java |  388 ++
 .../apache/impala/planner/PlanFragmentId.java   |   42 +
 .../java/org/apache/impala/planner/PlanId.java  |   42 +
 .../org/apache/impala/planner/PlanNode.java     |  715 ++++
 .../org/apache/impala/planner/PlanNodeId.java   |   42 +
 .../java/org/apache/impala/planner/Planner.java |  456 +++
 .../apache/impala/planner/PlannerContext.java   |  100 +
 .../impala/planner/RuntimeFilterGenerator.java  |  590 +++
 .../apache/impala/planner/RuntimeFilterId.java  |   45 +
 .../org/apache/impala/planner/ScanNode.java     |  191 +
 .../org/apache/impala/planner/SelectNode.java   |   93 +
 .../impala/planner/SingleNodePlanner.java       | 1594 ++++++++
 .../impala/planner/SingularRowSrcNode.java      |   82 +
 .../org/apache/impala/planner/SortNode.java     |  242 ++
 .../org/apache/impala/planner/SubplanNode.java  |  109 +
 .../org/apache/impala/planner/TableSink.java    |  114 +
 .../org/apache/impala/planner/UnionNode.java    |  231 ++
 .../org/apache/impala/planner/UnnestNode.java   |  107 +
 .../org/apache/impala/planner/ValueRange.java   |  121 +
 .../apache/impala/service/BackendConfig.java    |   49 +
 .../impala/service/CatalogOpExecutor.java       | 2990 +++++++++++++++
 .../impala/service/DescribeResultFactory.java   |  246 ++
 .../org/apache/impala/service/FeSupport.java    |  292 ++
 .../org/apache/impala/service/Frontend.java     | 1231 +++++++
 .../org/apache/impala/service/JniCatalog.java   |  255 ++
 .../org/apache/impala/service/JniFrontend.java  |  743 ++++
 .../org/apache/impala/service/MetadataOp.java   |  641 ++++
 .../apache/impala/util/AvroSchemaConverter.java |  209 ++
 .../apache/impala/util/AvroSchemaParser.java    |  204 +
 .../org/apache/impala/util/AvroSchemaUtils.java |  189 +
 .../org/apache/impala/util/DisjointSet.java     |  142 +
 .../org/apache/impala/util/EventSequence.java   |   58 +
 .../apache/impala/util/FileWatchService.java    |  140 +
 .../apache/impala/util/FsPermissionChecker.java |  301 ++
 .../org/apache/impala/util/GlogAppender.java    |  129 +
 .../org/apache/impala/util/HdfsCachingUtil.java |  515 +++
 .../java/org/apache/impala/util/KuduUtil.java   |  268 ++
 .../java/org/apache/impala/util/ListMap.java    |   77 +
 .../impala/util/MaxRowsProcessedVisitor.java    |   66 +
 .../apache/impala/util/MembershipSnapshot.java  |   84 +
 .../org/apache/impala/util/MetaStoreUtil.java   |  172 +
 .../org/apache/impala/util/NativeLibUtil.java   |   44 +
 .../org/apache/impala/util/NativeLogger.java    |   51 +
 .../org/apache/impala/util/PatternMatcher.java  |  108 +
 .../apache/impala/util/RequestPoolService.java  |  451 +++
 .../apache/impala/util/SentryPolicyService.java |  466 +++
 .../org/apache/impala/util/SentryProxy.java     |  348 ++
 .../org/apache/impala/util/StatsHelper.java     |   60 +
 .../apache/impala/util/TAccessLevelUtil.java    |   35 +
 .../apache/impala/util/TColumnValueUtil.java    |   51 +
 .../apache/impala/util/TResultRowBuilder.java   |   64 +
 .../apache/impala/util/TSessionStateUtil.java   |   38 +
 .../java/org/apache/impala/util/UnsafeUtil.java |   68 +
 .../java/org/apache/impala/util/Visitor.java    |   27 +
 .../impala/analysis/AnalyzeAuthStmtsTest.java   |  223 --
 .../impala/analysis/AnalyzeDDLTest.java         | 2971 ---------------
 .../impala/analysis/AnalyzeExprsTest.java       | 2401 ------------
 .../impala/analysis/AnalyzeModifyStmtsTest.java |  245 --
 .../impala/analysis/AnalyzeStmtsTest.java       | 3484 ------------------
 .../impala/analysis/AnalyzeSubqueriesTest.java  | 1287 -------
 .../cloudera/impala/analysis/AnalyzerTest.java  |  630 ----
 .../cloudera/impala/analysis/AuditingTest.java  |  383 --
 .../impala/analysis/AuthorizationTest.java      | 2236 -----------
 .../com/cloudera/impala/analysis/ExprTest.java  |   91 -
 .../cloudera/impala/analysis/ParserTest.java    | 3277 ----------------
 .../com/cloudera/impala/analysis/ToSqlTest.java | 1170 ------
 .../cloudera/impala/analysis/TypesUtilTest.java |  217 --
 .../catalog/CatalogObjectToFromThriftTest.java  |  247 --
 .../cloudera/impala/catalog/CatalogTest.java    |  645 ----
 .../impala/catalog/HdfsPartitionTest.java       |  115 -
 .../catalog/HdfsStorageDescriptorTest.java      |  161 -
 .../impala/catalog/TestSchemaUtils.java         |   49 -
 .../impala/common/FrontendTestBase.java         |  319 --
 .../cloudera/impala/hive/executor/TestUdf.java  |  154 -
 .../impala/hive/executor/UdfExecutorTest.java   |  454 ---
 .../cloudera/impala/planner/PlannerTest.java    |  282 --
 .../impala/planner/PlannerTestBase.java         |  693 ----
 .../cloudera/impala/planner/S3PlannerTest.java  |  176 -
 .../cloudera/impala/service/FrontendTest.java   |  258 --
 .../com/cloudera/impala/service/JdbcTest.java   |  550 ---
 .../impala/testutil/BlockIdGenerator.java       |  104 -
 .../testutil/CatalogServiceTestCatalog.java     |   64 -
 .../impala/testutil/ImpalaJdbcClient.java       |  319 --
 .../impala/testutil/ImpaladTestCatalog.java     |   91 -
 .../impala/testutil/SentryServicePinger.java    |   87 -
 .../impala/testutil/TestFileParser.java         |  352 --
 .../com/cloudera/impala/testutil/TestUtils.java |  290 --
 .../com/cloudera/impala/util/KuduUtilTest.java  |  120 -
 .../cloudera/impala/util/TestDisjointSet.java   |  160 -
 .../impala/util/TestRequestPoolService.java     |  290 --
 .../impala/analysis/AnalyzeAuthStmtsTest.java   |  223 ++
 .../apache/impala/analysis/AnalyzeDDLTest.java  | 2971 +++++++++++++++
 .../impala/analysis/AnalyzeExprsTest.java       | 2401 ++++++++++++
 .../impala/analysis/AnalyzeModifyStmtsTest.java |  245 ++
 .../impala/analysis/AnalyzeStmtsTest.java       | 3484 ++++++++++++++++++
 .../impala/analysis/AnalyzeSubqueriesTest.java  | 1287 +++++++
 .../apache/impala/analysis/AnalyzerTest.java    |  630 ++++
 .../apache/impala/analysis/AuditingTest.java    |  383 ++
 .../impala/analysis/AuthorizationTest.java      | 2236 +++++++++++
 .../org/apache/impala/analysis/ExprTest.java    |   91 +
 .../org/apache/impala/analysis/ParserTest.java  | 3277 ++++++++++++++++
 .../org/apache/impala/analysis/ToSqlTest.java   | 1170 ++++++
 .../apache/impala/analysis/TypesUtilTest.java   |  217 ++
 .../catalog/CatalogObjectToFromThriftTest.java  |  247 ++
 .../org/apache/impala/catalog/CatalogTest.java  |  645 ++++
 .../impala/catalog/HdfsPartitionTest.java       |  115 +
 .../catalog/HdfsStorageDescriptorTest.java      |  161 +
 .../apache/impala/catalog/TestSchemaUtils.java  |   49 +
 .../apache/impala/common/FrontendTestBase.java  |  319 ++
 .../apache/impala/hive/executor/TestUdf.java    |  154 +
 .../impala/hive/executor/UdfExecutorTest.java   |  454 +++
 .../org/apache/impala/planner/PlannerTest.java  |  282 ++
 .../apache/impala/planner/PlannerTestBase.java  |  693 ++++
 .../apache/impala/planner/S3PlannerTest.java    |  176 +
 .../org/apache/impala/service/FrontendTest.java |  258 ++
 .../org/apache/impala/service/JdbcTest.java     |  550 +++
 .../impala/testutil/BlockIdGenerator.java       |  104 +
 .../testutil/CatalogServiceTestCatalog.java     |   64 +
 .../impala/testutil/ImpalaJdbcClient.java       |  319 ++
 .../impala/testutil/ImpaladTestCatalog.java     |   91 +
 .../impala/testutil/SentryServicePinger.java    |   87 +
 .../apache/impala/testutil/TestFileParser.java  |  352 ++
 .../org/apache/impala/testutil/TestUtils.java   |  290 ++
 .../org/apache/impala/util/KuduUtilTest.java    |  120 +
 .../org/apache/impala/util/TestDisjointSet.java |  160 +
 .../impala/util/TestRequestPoolService.java     |  290 ++
 .../datagenerator/CsvToHBaseConverter.java      |  101 -
 .../HBaseTestDataRegionAssigment.java           |  320 --
 .../datagenerator/JsonToParquetConverter.java   |   93 -
 .../RandomNestedDataGenerator.java              |  265 --
 .../impala/datagenerator/TestDataGenerator.java |  192 -
 .../datagenerator/CsvToHBaseConverter.java      |  101 +
 .../HBaseTestDataRegionAssigment.java           |  320 ++
 .../datagenerator/JsonToParquetConverter.java   |   93 +
 .../RandomNestedDataGenerator.java              |  265 ++
 .../impala/datagenerator/TestDataGenerator.java |  192 +
 .../cloudera/impala/IncompatibleUdfTest.java    |   71 -
 .../java/com/cloudera/impala/JavaUdfTest.java   |   91 -
 .../main/java/com/cloudera/impala/TestUdf.java  |  150 -
 .../com/cloudera/impala/TestUdfException.java   |   32 -
 .../java/com/cloudera/impala/TestUpdateUdf.java |   38 -
 .../java/com/cloudera/impala/UnresolvedUdf.java |   39 -
 .../org/apache/impala/IncompatibleUdfTest.java  |   71 +
 .../java/org/apache/impala/JavaUdfTest.java     |   91 +
 .../main/java/org/apache/impala/TestUdf.java    |  150 +
 .../org/apache/impala/TestUdfException.java     |   32 +
 .../java/org/apache/impala/TestUpdateUdf.java   |   38 +
 .../java/org/apache/impala/UnresolvedUdf.java   |   39 +
 740 files changed, 99398 insertions(+), 99398 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/util/SerializationUtils.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/util/SerializationUtils.java b/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/util/SerializationUtils.java
deleted file mode 100644
index b8fd675..0000000
--- a/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/util/SerializationUtils.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource.util;
-
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.sql.Timestamp;
-
-/**
- * Utility methods for serialization by an ExternalDataSource.
- */
-public class SerializationUtils {
-
-  /**
-   * Encodes a DECIMAL value.
-   */
-  public static ByteBuffer encodeDecimal(BigDecimal decimal) {
-    if (decimal == null) throw new NullPointerException("decimal cannot be null.");
-    return ByteBuffer.wrap(decimal.unscaledValue().toByteArray());
-  }
-
-  /**
-   * Encodes a TIMESTAMP value.
-   */
-  public static ByteBuffer encodeTimestamp(Timestamp timestamp) {
-    if (timestamp == null) throw new NullPointerException("timestamp cannot be null.");
-    ByteBuffer buffer = ByteBuffer.allocate(8 + 4);
-    buffer.putLong(timestamp.getTime() / 1000);
-    buffer.putInt(timestamp.getNanos());
-    buffer.rewind();
-    return buffer;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/v1/ExternalDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/v1/ExternalDataSource.java b/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/v1/ExternalDataSource.java
deleted file mode 100644
index 66cf1e5..0000000
--- a/ext-data-source/api/src/main/java/com/cloudera/impala/extdatasource/v1/ExternalDataSource.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource.v1;
-
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-
-/**
- * Defines an external data source. Called by Impala during planning (prepare() only)
- * and during query execution (open(), getNext(), and close()).
- * TODO: Add javadocs
- */
-public interface ExternalDataSource {
-
-  /**
-   * Called during the planning phase and serves two purposes:
-   *  1) to pass information to the query planner for a specific scan operation (right
-   *     now only an estimate of the number of rows returned).
-   *  2) to accept or reject predicates that are present in the query; accepted
-   *     predicates are then handed over to the library when the scan is initiated with
-   *     the Open() call.
-   * If prepare() fails, query planning will return with an error.
-   */
-  TPrepareResult prepare(TPrepareParams params);
-
-  /**
-   * Starts a scan. Called during query execution before any calls to getNext().
-   */
-  TOpenResult open(TOpenParams params);
-
-  /**
-   * Gets the next row batch of the scan.
-   */
-  TGetNextResult getNext(TGetNextParams params);
-
-  /**
-   * Ends the scan. After this call Impala will not make any more getNext() calls for
-   * this same handle and the implementation is free to release all related resources.
-   * Can be called at any point after open() has been called, even if the scan itself
-   * hasn't finished (TGetNextResult.eos was not set to true).
-   * Should always be called once unless prepare() fails.
-   */
-  TCloseResult close(TCloseParams params);
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
new file mode 100644
index 0000000..b8fd675
--- /dev/null
+++ b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.extdatasource.util;
+
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+
+/**
+ * Utility methods for serialization by an ExternalDataSource.
+ */
+public class SerializationUtils {
+
+  /**
+   * Encodes a DECIMAL value.
+   */
+  public static ByteBuffer encodeDecimal(BigDecimal decimal) {
+    if (decimal == null) throw new NullPointerException("decimal cannot be null.");
+    return ByteBuffer.wrap(decimal.unscaledValue().toByteArray());
+  }
+
+  /**
+   * Encodes a TIMESTAMP value.
+   */
+  public static ByteBuffer encodeTimestamp(Timestamp timestamp) {
+    if (timestamp == null) throw new NullPointerException("timestamp cannot be null.");
+    ByteBuffer buffer = ByteBuffer.allocate(8 + 4);
+    buffer.putLong(timestamp.getTime() / 1000);
+    buffer.putInt(timestamp.getNanos());
+    buffer.rewind();
+    return buffer;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
new file mode 100644
index 0000000..66cf1e5
--- /dev/null
+++ b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.extdatasource.v1;
+
+import com.cloudera.impala.extdatasource.thrift.TCloseParams;
+import com.cloudera.impala.extdatasource.thrift.TCloseResult;
+import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
+import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
+import com.cloudera.impala.extdatasource.thrift.TOpenParams;
+import com.cloudera.impala.extdatasource.thrift.TOpenResult;
+import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
+import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
+
+/**
+ * Defines an external data source. Called by Impala during planning (prepare() only)
+ * and during query execution (open(), getNext(), and close()).
+ * TODO: Add javadocs
+ */
+public interface ExternalDataSource {
+
+  /**
+   * Called during the planning phase and serves two purposes:
+   *  1) to pass information to the query planner for a specific scan operation (right
+   *     now only an estimate of the number of rows returned).
+   *  2) to accept or reject predicates that are present in the query; accepted
+   *     predicates are then handed over to the library when the scan is initiated with
+   *     the Open() call.
+   * If prepare() fails, query planning will return with an error.
+   */
+  TPrepareResult prepare(TPrepareParams params);
+
+  /**
+   * Starts a scan. Called during query execution before any calls to getNext().
+   */
+  TOpenResult open(TOpenParams params);
+
+  /**
+   * Gets the next row batch of the scan.
+   */
+  TGetNextResult getNext(TGetNextParams params);
+
+  /**
+   * Ends the scan. After this call Impala will not make any more getNext() calls for
+   * this same handle and the implementation is free to release all related resources.
+   * Can be called at any point after open() has been called, even if the scan itself
+   * hasn't finished (TGetNextResult.eos was not set to true).
+   * Should always be called once unless prepare() fails.
+   */
+  TCloseResult close(TCloseParams params);
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/sample/src/main/java/com/cloudera/impala/extdatasource/sample/EchoDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/sample/src/main/java/com/cloudera/impala/extdatasource/sample/EchoDataSource.java b/ext-data-source/sample/src/main/java/com/cloudera/impala/extdatasource/sample/EchoDataSource.java
deleted file mode 100644
index e3d39c4..0000000
--- a/ext-data-source/sample/src/main/java/com/cloudera/impala/extdatasource/sample/EchoDataSource.java
+++ /dev/null
@@ -1,75 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource.sample;
-
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.thrift.TRowBatch;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TColumnData;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.google.common.collect.Lists;
-
-/**
- * Sample data source that always returns a single column containing the initString.
- */
-public class EchoDataSource implements ExternalDataSource {
-  private static final TStatus STATUS_OK =
-      new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
-
-  private String initString_;
-
-  @Override
-  public TPrepareResult prepare(TPrepareParams params) {
-    return new TPrepareResult(STATUS_OK)
-      .setAccepted_conjuncts(Lists.<Integer>newArrayList())
-      .setNum_rows_estimate(1);
-  }
-
-  @Override
-  public TOpenResult open(TOpenParams params) {
-    initString_ = params.getInit_string();
-    return new TOpenResult(STATUS_OK).setScan_handle("dummy-handle");
-  }
-
-  @Override
-  public TGetNextResult getNext(TGetNextParams params) {
-    boolean eos = true;
-    TGetNextResult result = new TGetNextResult(STATUS_OK).setEos(eos);
-    TRowBatch rowBatch = new TRowBatch();
-    TColumnData colData = new TColumnData();
-    colData.addToIs_null(false);
-    colData.addToString_vals(initString_);
-    rowBatch.addToCols(colData);
-    rowBatch.setNum_rows(1);
-    result.setRows(rowBatch);
-    return result;
-  }
-
-  @Override
-  public TCloseResult close(TCloseParams params) {
-    return new TCloseResult(STATUS_OK);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java b/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
new file mode 100644
index 0000000..e3d39c4
--- /dev/null
+++ b/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
@@ -0,0 +1,75 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.extdatasource.sample;
+
+import com.cloudera.impala.extdatasource.thrift.TCloseParams;
+import com.cloudera.impala.extdatasource.thrift.TCloseResult;
+import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
+import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
+import com.cloudera.impala.extdatasource.thrift.TOpenParams;
+import com.cloudera.impala.extdatasource.thrift.TOpenResult;
+import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
+import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
+import com.cloudera.impala.extdatasource.thrift.TRowBatch;
+import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
+import com.cloudera.impala.thrift.TColumnData;
+import com.cloudera.impala.thrift.TStatus;
+import com.cloudera.impala.thrift.TErrorCode;
+import com.google.common.collect.Lists;
+
+/**
+ * Sample data source that always returns a single column containing the initString.
+ */
+public class EchoDataSource implements ExternalDataSource {
+  private static final TStatus STATUS_OK =
+      new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
+
+  private String initString_;
+
+  @Override
+  public TPrepareResult prepare(TPrepareParams params) {
+    return new TPrepareResult(STATUS_OK)
+      .setAccepted_conjuncts(Lists.<Integer>newArrayList())
+      .setNum_rows_estimate(1);
+  }
+
+  @Override
+  public TOpenResult open(TOpenParams params) {
+    initString_ = params.getInit_string();
+    return new TOpenResult(STATUS_OK).setScan_handle("dummy-handle");
+  }
+
+  @Override
+  public TGetNextResult getNext(TGetNextParams params) {
+    boolean eos = true;
+    TGetNextResult result = new TGetNextResult(STATUS_OK).setEos(eos);
+    TRowBatch rowBatch = new TRowBatch();
+    TColumnData colData = new TColumnData();
+    colData.addToIs_null(false);
+    colData.addToString_vals(initString_);
+    rowBatch.addToCols(colData);
+    rowBatch.setNum_rows(1);
+    result.setRows(rowBatch);
+    return result;
+  }
+
+  @Override
+  public TCloseResult close(TCloseParams params) {
+    return new TCloseResult(STATUS_OK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/test/src/main/java/com/cloudera/impala/extdatasource/AllTypesDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/test/src/main/java/com/cloudera/impala/extdatasource/AllTypesDataSource.java b/ext-data-source/test/src/main/java/com/cloudera/impala/extdatasource/AllTypesDataSource.java
deleted file mode 100644
index a6a731b..0000000
--- a/ext-data-source/test/src/main/java/com/cloudera/impala/extdatasource/AllTypesDataSource.java
+++ /dev/null
@@ -1,323 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.sql.Timestamp;
-import java.util.List;
-import java.util.UUID;
-
-import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.thrift.TRowBatch;
-import com.cloudera.impala.extdatasource.thrift.TTableSchema;
-import com.cloudera.impala.extdatasource.util.SerializationUtils;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TColumnData;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
-/**
- * Data source implementation for tests that can:
- * (a) Accepts every other offered conjunct for testing planning (though predicates are
- *     not actually evaluated) and returns trivial data of all supported types for
- *     query tests.
- * (b) Validate the predicates offered by Impala.
- */
-public class AllTypesDataSource implements ExternalDataSource {
-  // Total number of rows to return
-  private static final int NUM_ROWS_RETURNED = 5000;
-
-  // Change the size of the batches that are returned
-  private static final int INITIAL_BATCH_SIZE = 500;
-  private static final int BATCH_SIZE_INCREMENT = 100;
-
-  private static final TStatus STATUS_OK =
-      new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
-
-  private int currRow_;
-  private boolean eos_;
-  private int batchSize_;
-  private TTableSchema schema_;
-  private DataSourceState state_;
-  private String scanHandle_;
-  private String validatePredicatesResult_;
-
-  // Enumerates the states of the data source.
-  private enum DataSourceState {
-    CREATED,
-    OPENED,
-    CLOSED
-  }
-
-  public AllTypesDataSource() {
-    eos_ = false;
-    currRow_ = 0;
-    state_ = DataSourceState.CREATED;
-  }
-
-  /**
-   * Accepts every other conjunct and returns the constant number of rows that
-   * is always returned.
-   */
-  @Override
-  public TPrepareResult prepare(TPrepareParams params) {
-    Preconditions.checkState(state_ == DataSourceState.CREATED);
-    List<Integer> accepted = Lists.newArrayList();
-    int numRowsReturned = 0;
-    if (validatePredicates(params.getPredicates())) {
-      // Indicate all predicates are applied because we return a dummy row with the
-      // result later to validate the result in tests. Impala shouldn't try to apply
-      // predicates to that dummy row.
-      for (int i = 0; i < params.getPredicatesSize(); ++i) accepted.add(i);
-      numRowsReturned = 1;
-    } else {
-      // Default behavior is to accept every other predicate. They are not actually
-      // applied, but we want to validate that Impala applies the correct predicates.
-      for (int i = 0; i < params.getPredicatesSize(); ++i) {
-        if (i % 2 == 0) accepted.add(i);
-      }
-      numRowsReturned = NUM_ROWS_RETURNED;
-    }
-    return new TPrepareResult(STATUS_OK)
-      .setAccepted_conjuncts(accepted)
-      .setNum_rows_estimate(numRowsReturned);
-  }
-
-  /**
-   * If the predicate value (assuming STRING) starts with 'VALIDATE_PREDICATES##',
-   * we validate the TPrepareParams.predicates against predicates specified after the
-   * 'VALIDATE_PREDICATES##' and return true. The result of the validation is stored
-   * in validatePredicatesResult_.
-   *
-   * The expected predicates are specified in the form "{slot} {TComparisonOp} {val}",
-   * and conjunctive predicates are separated by '&&'.
-   *
-   * For example, the predicates_spec validates the predicates in the following query:
-   *    select * from table_name
-   *    where predicates_spec = 'x LT 1 && y GT 2' and
-   *          x < 1 and
-   *          2 > y;
-   *
-   * Current limitations:
-   *  - Disjunctive predicates are not supported (e.g. "expr1 or expr2")
-   *  - Only INT is supported
-   */
-  private boolean validatePredicates(List<List<TBinaryPredicate>> predicates) {
-    if (predicates == null || predicates.isEmpty()) return false;
-    TBinaryPredicate firstPredicate = predicates.get(0).get(0);
-    if (!firstPredicate.getValue().isSetString_val()) return false;
-    String colVal = firstPredicate.getValue().getString_val();
-    if (!colVal.toUpperCase().startsWith("VALIDATE_PREDICATES##")) return false;
-
-    String[] colValParts = colVal.split("##");
-    Preconditions.checkArgument(colValParts.length == 2);
-    String[] expectedPredicates = colValParts[1].split("&&");
-    Preconditions.checkArgument(expectedPredicates.length == predicates.size() - 1);
-
-    String result = "SUCCESS";
-    for (int i = 1; i < predicates.size(); ++i) {
-      String[] predicateParts = expectedPredicates[i - 1].trim().split(" ");
-      Preconditions.checkArgument(predicateParts.length == 3);
-      TBinaryPredicate predicate =
-          Iterables.getOnlyElement(predicates.get(i));
-      Preconditions.checkArgument(predicate.getValue().isSetInt_val());
-
-      String slotName = predicate.getCol().getName().toUpperCase();
-      int intVal = predicate.getValue().getInt_val();
-      if (!predicateParts[0].toUpperCase().equals(slotName) ||
-          !predicateParts[1].toUpperCase().equals(predicate.getOp().name()) ||
-          !predicateParts[2].equals(Integer.toString(intVal))) {
-        result = "Failed predicate, expected=" + expectedPredicates[i - 1].trim() +
-            " actual=" + predicate.toString();
-      }
-    }
-    validatePredicatesResult_ = result;
-    return true;
-  }
-
-  /**
-   * Initializes the batch size and stores the table schema.
-   */
-  @Override
-  public TOpenResult open(TOpenParams params) {
-    Preconditions.checkState(state_ == DataSourceState.CREATED);
-    state_ = DataSourceState.OPENED;
-    batchSize_ = INITIAL_BATCH_SIZE;
-    schema_ = params.getRow_schema();
-    // Need to check validatePredicates again because the call in Prepare() was from
-    // the frontend and used a different instance of this data source class.
-    if (validatePredicates(params.getPredicates())) {
-      // If validating predicates, only one STRING column should be selected.
-      Preconditions.checkArgument(schema_.getColsSize() == 1);
-      TColumnDesc firstCol = schema_.getCols().get(0);
-      TColumnType firstType = firstCol.getType();
-      Preconditions.checkState(firstType.getTypesSize() == 1);
-      Preconditions.checkState(firstType.types.get(0).getType() == TTypeNodeType.SCALAR);
-      Preconditions.checkArgument(
-          firstType.types.get(0).scalar_type.getType() == TPrimitiveType.STRING);
-    }
-    scanHandle_ = UUID.randomUUID().toString();
-    return new TOpenResult(STATUS_OK).setScan_handle(scanHandle_);
-  }
-
-  /**
-   * If validating predicates, returns a single row with the result of the validation.
-   * Otherwise returns row batches with generated rows based on the row index. Called
-   * multiple times, so the current row is stored between calls. Each row batch is a
-   * different size (not necessarily the size specified by TOpenParams.batch_size to
-   * ensure that Impala can handle unexpected batch sizes.
-   */
-  @Override
-  public TGetNextResult getNext(TGetNextParams params) {
-    Preconditions.checkState(state_ == DataSourceState.OPENED);
-    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
-    if (eos_) return new TGetNextResult(STATUS_OK).setEos(eos_);
-
-    if (validatePredicatesResult_ != null) {
-      TColumnData colData = new TColumnData();
-      colData.setIs_null(Lists.newArrayList(false));
-      colData.setString_vals(Lists.newArrayList(validatePredicatesResult_));
-      eos_ = true;
-      return new TGetNextResult(STATUS_OK).setEos(eos_)
-          .setRows(new TRowBatch().setCols(Lists.newArrayList(colData)).setNum_rows(1));
-    }
-
-    List<TColumnData> cols = Lists.newArrayList();
-    for (int i = 0; i < schema_.getColsSize(); ++i) {
-      cols.add(new TColumnData().setIs_null(Lists.<Boolean>newArrayList()));
-    }
-
-    int numAdded = 0;
-    while (currRow_ < NUM_ROWS_RETURNED && numAdded < batchSize_) {
-      addRow(cols);
-      ++numAdded;
-      ++currRow_;
-    }
-
-    batchSize_ += BATCH_SIZE_INCREMENT;
-    if (currRow_ == NUM_ROWS_RETURNED) eos_ = true;
-    return new TGetNextResult(STATUS_OK).setEos(eos_)
-        .setRows(new TRowBatch().setCols(cols).setNum_rows(numAdded));
-  }
-
-  /**
-   * Adds a row to the set of columns. For all numeric types the value is set to the
-   * row index (mod the size for integer types). For strings it is just a string
-   * containing the row index and every 5th result is null.
-   */
-  private void addRow(List<TColumnData> cols) {
-    for (int i = 0; i < cols.size(); ++i) {
-      TColumnDesc colDesc = schema_.getCols().get(i);
-      TColumnData colData = cols.get(i);
-      TColumnType type = colDesc.getType();
-      if (type.types.get(0).getType() != TTypeNodeType.SCALAR) {
-        // Unsupported non-scalar type.
-        throw new UnsupportedOperationException("Unsupported column type: " +
-            type.types.get(0).getType());
-      }
-      Preconditions.checkState(type.getTypesSize() == 1);
-      TScalarType scalarType = type.types.get(0).scalar_type;
-      switch (scalarType.type) {
-        case TINYINT:
-          colData.addToIs_null(false);
-          colData.addToByte_vals((byte) (currRow_ % 10));
-          break;
-        case SMALLINT:
-          colData.addToIs_null(false);
-          colData.addToShort_vals((short) (currRow_ % 100));
-          break;
-        case INT:
-          colData.addToIs_null(false);
-          colData.addToInt_vals(currRow_);
-          break;
-        case BIGINT:
-          colData.addToIs_null(false);
-          colData.addToLong_vals((long) currRow_ * 10);
-          break;
-        case DOUBLE:
-          colData.addToIs_null(false);
-          colData.addToDouble_vals(currRow_);
-          break;
-        case FLOAT:
-          colData.addToIs_null(false);
-          colData.addToDouble_vals((float) (1.1 * currRow_));
-          break;
-        case STRING:
-          if (currRow_ % 5 == 0) {
-            colData.addToIs_null(true);
-          } else {
-            colData.addToIs_null(false);
-            colData.addToString_vals(String.valueOf(currRow_));
-          }
-          break;
-        case BOOLEAN:
-          colData.addToIs_null(false);
-          colData.addToBool_vals(currRow_ % 2 == 0);
-          break;
-        case TIMESTAMP:
-          colData.addToIs_null(false);
-          colData.addToBinary_vals(
-            SerializationUtils.encodeTimestamp(new Timestamp(currRow_)));
-          break;
-        case DECIMAL:
-          colData.addToIs_null(false);
-          BigInteger maxUnscaled = BigInteger.TEN.pow(scalarType.getPrecision());
-          BigInteger val = maxUnscaled.subtract(BigInteger.valueOf(currRow_ + 1));
-          val = val.mod(maxUnscaled);
-          if (currRow_ % 2 == 0) val = val.negate();
-          colData.addToBinary_vals(SerializationUtils.encodeDecimal(new BigDecimal(val)));
-          break;
-        case BINARY:
-        case CHAR:
-        case DATE:
-        case DATETIME:
-        case INVALID_TYPE:
-        case NULL_TYPE:
-        default:
-          // Unsupported.
-          throw new UnsupportedOperationException("Unsupported column type: " +
-              scalarType.getType());
-      }
-    }
-  }
-
-  @Override
-  public TCloseResult close(TCloseParams params) {
-    Preconditions.checkState(state_ == DataSourceState.OPENED);
-    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
-    state_ = DataSourceState.CLOSED;
-    return new TCloseResult(STATUS_OK);
-  }
-}


[49/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetCachedStmt.java
deleted file mode 100644
index f63cf3e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetCachedStmt.java
+++ /dev/null
@@ -1,92 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE [PARTITION partitionSpec] SET [UNCACHED|CACHED 'pool'].
- */
-public class AlterTableSetCachedStmt extends AlterTableSetStmt {
-  private final HdfsCachingOp cacheOp_;
-
-  public AlterTableSetCachedStmt(TableName tableName,
-      PartitionSpec partitionSpec, HdfsCachingOp cacheOp) {
-    super(tableName, partitionSpec);
-    Preconditions.checkNotNull(cacheOp);
-    cacheOp_ = cacheOp;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.SET_CACHED);
-    TAlterTableSetCachedParams cachingParams =
-        new TAlterTableSetCachedParams();
-    if (getPartitionSpec() != null) {
-      cachingParams.setPartition_spec(getPartitionSpec().toThrift());
-    }
-    cachingParams.setCache_op(cacheOp_.toThrift());
-    params.setSet_cached_params(cachingParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    cacheOp_.analyze(analyzer);
-
-    Table table = getTargetTable();
-    Preconditions.checkNotNull(table);
-    if (!(table instanceof HdfsTable)) {
-      throw new AnalysisException("ALTER TABLE SET [CACHED|UNCACHED] must target an " +
-          "HDFS table: " + table.getFullName());
-    }
-
-    if (cacheOp_.shouldCache()) {
-      boolean isCacheable;
-      PartitionSpec partSpec = getPartitionSpec();
-      HdfsTable hdfsTable = (HdfsTable)table;
-      StringBuilder nameSb = new StringBuilder();
-      if (partSpec != null) {
-        HdfsPartition part = hdfsTable.getPartition(partSpec.getPartitionSpecKeyValues());
-        if (part == null) {
-          throw new AnalysisException("Partition spec does not exist: " +
-              partSpec.toSql());
-        }
-        isCacheable = part.isCacheable();
-        nameSb.append("Partition (" + part.getPartitionName() + ")");
-      } else {
-        isCacheable = hdfsTable.isCacheable();
-        nameSb.append("Table ").append(table.getFullName());
-      }
-      if (!isCacheable) {
-        throw new AnalysisException(nameSb.toString() + " cannot be cached. Please " +
-            "check if the table or partitions are on a filesystem which supports " +
-            "caching.");
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetColumnStats.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetColumnStats.java
deleted file mode 100644
index e9c9a14..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetColumnStats.java
+++ /dev/null
@@ -1,158 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
-* Represents an ALTER TABLE [<dbName>.]<tableName> SET COLUMN STATS <colName>
-* ('statsKey'='val','statsKey2',='val2') statement.
-*
-* The keys as well as the values are specified as string literals to be consistent
-* with the existing DDL for setting TBLPROPERTIES/SERDEPROPERTIES, in particular,
-* setting the 'numRows' table/partition property.
-*
-* Stats key comparisons are case-insensitive.
-*/
-public class AlterTableSetColumnStats extends AlterTableStmt {
-  private final String colName_;
-  private final HashMap<String, String> statsMap_;
-
-  // Complete column stats reflecting this alteration. Existing stats values
-  // are preserved. Result of analysis.
-  private ColumnStats colStats_;
-
-  public AlterTableSetColumnStats(TableName tableName, String colName,
-      HashMap<String, String> statsMap) {
-    super(tableName);
-    colName_ = colName;
-    statsMap_ = statsMap;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-
-    Column col = getTargetTable().getColumn(colName_);
-    if (col == null) {
-      throw new AnalysisException(
-          String.format("Column '%s' does not exist in table: %s",
-              colName_, getTargetTable().getFullName()));
-    }
-    // Cannot update stats on partition columns because the HMS has no entries
-    // for them, and the stats can be computed directly from the metadata.
-    if (col.getPosition() < getTargetTable().getNumClusteringCols()) {
-      throw new AnalysisException(
-          "Updating the stats of a partition column is not allowed: " + colName_);
-    }
-    // Cannot update the stats if they are not supported for the column's type.
-    if (!ColumnStats.isSupportedColType(col.getType())) {
-      throw new AnalysisException(String.format(
-          "Statistics for column '%s' are not supported because " +
-          "it has type '%s'.", col.getName(), col.getType().toSql()));
-    }
-
-    // Copy the existing stats and then change the values according to the
-    // stats map of this stmt. The existing stats are first copied to preserve
-    // those stats values that are not changed by this stmt because all stats
-    // values are updated when altering the stats in the HMS.
-    colStats_ = col.getStats().clone();
-    for (Map.Entry<String, String> entry: statsMap_.entrySet()) {
-      ColumnStats.StatsKey statsKey = ColumnStats.StatsKey.fromString(entry.getKey());
-      if (statsKey == null) {
-        throw new AnalysisException(String.format(
-            "Invalid column stats key: %s\nValid keys are: %s",
-            entry.getKey(), Joiner.on(',').join(ColumnStats.StatsKey.values())));
-      }
-      setStatsValue(statsKey, entry.getValue(), col, colStats_);
-    }
-  }
-
-  /**
-   * Updates the given column stats based on statsKey and statsValue.
-   * Throws an AnalysisException if the statsValue is invalid or not applicable to the
-   * column (e.g., trying to update the avg/max size of a fixed-length column).
-   */
-  private void setStatsValue(ColumnStats.StatsKey statsKey, String statsValue,
-      Column col, ColumnStats stats) throws AnalysisException {
-    // Updating max/avg size is only allowed for variable length columns.
-    if (col.getType().isFixedLengthType()
-        && (statsKey == ColumnStats.StatsKey.AVG_SIZE
-            || statsKey == ColumnStats.StatsKey.MAX_SIZE)) {
-      throw new AnalysisException(String.format(
-          "Cannot update the '%s' stats of column '%s' with type '%s'.\n" +
-          "Changing '%s' is only allowed for variable-length columns.",
-          statsKey, col.getName(), col.getType().toSql(), statsKey));
-    }
-
-    if (statsKey == ColumnStats.StatsKey.NUM_DISTINCT_VALUES ||
-        statsKey == ColumnStats.StatsKey.NUM_NULLS ||
-        statsKey == ColumnStats.StatsKey.MAX_SIZE) {
-      Long statsVal = null;
-      try {
-        statsVal = Long.parseLong(statsValue);
-      } catch (Exception e) {
-      }
-      if (statsVal == null || statsVal < -1) {
-        throw new AnalysisException(String.format(
-            "Invalid stats value '%s' for column stats key: %s\n" +
-            "Expected a positive integer or -1 for unknown.",
-            statsValue, statsKey));
-      }
-      stats.update(statsKey, statsVal);
-    } else if (statsKey == ColumnStats.StatsKey.AVG_SIZE) {
-      Float statsVal = null;
-      try {
-        statsVal = Float.parseFloat(statsValue);
-      } catch (Exception e) {
-      }
-      if (statsVal == null || (statsVal < 0 && statsVal != -1) ||
-          statsVal.isNaN() || statsVal.isInfinite()) {
-        throw new AnalysisException(String.format(
-            "Invalid stats value '%s' for column stats key: %s\n" +
-            "Expected a positive floating-point number or -1 for unknown.",
-            statsValue, statsKey));
-      }
-      stats.update(statsKey, statsVal);
-    } else {
-      Preconditions.checkState(false, "Unhandled StatsKey value: " + statsKey);
-    }
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-   TAlterTableParams params = super.toThrift();
-   params.setAlter_type(TAlterTableType.UPDATE_STATS);
-   TAlterTableUpdateStatsParams updateStatsParams =
-       new TAlterTableUpdateStatsParams();
-   updateStatsParams.setTable_name(getTargetTable().getTableName().toThrift());
-   updateStatsParams.putToColumn_stats(colName_.toString(), colStats_.toThrift());
-   params.setUpdate_stats_params(updateStatsParams);
-   return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetFileFormatStmt.java
deleted file mode 100644
index 7e18aa6..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetFileFormatStmt.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-
-/**
- * Represents an ALTER TABLE [PARTITION partitionSpec] SET FILEFORMAT statement.
- */
-public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
-  private final THdfsFileFormat fileFormat_;
-
-  public AlterTableSetFileFormatStmt(TableName tableName,
-      PartitionSpec partitionSpec, THdfsFileFormat fileFormat) {
-    super(tableName, partitionSpec);
-    this.fileFormat_ = fileFormat;
-  }
-
-  public THdfsFileFormat getFileFormat() { return fileFormat_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.SET_FILE_FORMAT);
-    TAlterTableSetFileFormatParams fileFormatParams =
-        new TAlterTableSetFileFormatParams(fileFormat_);
-    if (getPartitionSpec() != null) {
-      fileFormatParams.setPartition_spec(getPartitionSpec().toThrift());
-    }
-    params.setSet_file_format_params(fileFormatParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetLocationStmt.java
deleted file mode 100644
index fcc9b53..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetLocationStmt.java
+++ /dev/null
@@ -1,86 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.permission.FsAction;
-
-/**
- * Represents an ALTER TABLE [PARTITION partitionSpec] SET LOCATION statement.
- */
-public class AlterTableSetLocationStmt extends AlterTableSetStmt {
-  private final HdfsUri location_;
-
-  public AlterTableSetLocationStmt(TableName tableName,
-      PartitionSpec partitionSpec, HdfsUri location) {
-    super(tableName, partitionSpec);
-    Preconditions.checkNotNull(location);
-    this.location_ = location;
-  }
-
-  public HdfsUri getLocation() { return location_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.SET_LOCATION);
-    TAlterTableSetLocationParams locationParams =
-        new TAlterTableSetLocationParams(location_.toString());
-    if (getPartitionSpec() != null) {
-      locationParams.setPartition_spec(getPartitionSpec().toThrift());
-    }
-    params.setSet_location_params(locationParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-
-    Table table = getTargetTable();
-    Preconditions.checkNotNull(table);
-    if (table instanceof HdfsTable) {
-      HdfsTable hdfsTable = (HdfsTable) table;
-      if (getPartitionSpec() != null) {
-        // Targeting a partition rather than a table.
-        PartitionSpec partitionSpec = getPartitionSpec();
-        HdfsPartition partition = hdfsTable.getPartition(
-            partitionSpec.getPartitionSpecKeyValues());
-        Preconditions.checkNotNull(partition);
-        if (partition.isMarkedCached()) {
-          throw new AnalysisException(String.format("Target partition is cached, " +
-              "please uncache before changing the location using: ALTER TABLE %s %s " +
-              "SET UNCACHED", table.getFullName(), partitionSpec.toSql()));
-        }
-      } else if (hdfsTable.isMarkedCached()) {
-        throw new AnalysisException(String.format("Target table is cached, please " +
-            "uncache before changing the location using: ALTER TABLE %s SET UNCACHED",
-            table.getFullName()));
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetStmt.java
deleted file mode 100644
index 2857211..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetStmt.java
+++ /dev/null
@@ -1,58 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-
-/**
- * Base class for all ALTER TABLE ... SET statements
- */
-public class AlterTableSetStmt extends AlterTableStmt {
-  protected final PartitionSpec partitionSpec_;
-
-  public AlterTableSetStmt(TableName tableName, PartitionSpec partitionSpec) {
-    super(tableName);
-    partitionSpec_ = partitionSpec;
-    if (partitionSpec_ != null) partitionSpec_.setTableName(tableName);
-  }
-
-  public PartitionSpec getPartitionSpec() { return partitionSpec_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support ALTER TABLE SET on HBase tables. Requires validating changes
-    // to the SERDEPROPERTIES and TBLPROPERTIES to ensure the table metadata does not
-    // become invalid.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE SET not currently supported on " +
-          "HBase tables.");
-    }
-
-    // Altering the table rather than the partition.
-    if (partitionSpec_ == null) return;
-
-    partitionSpec_.setPartitionShouldExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetTblProperties.java
deleted file mode 100644
index da92267..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableSetTblProperties.java
+++ /dev/null
@@ -1,150 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.avro.SchemaParseException;
-import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
-
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.*;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.MetaStoreUtil;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
-* Represents an ALTER TABLE SET [PARTITION ('k1'='a', 'k2'='b'...)]
-* TBLPROPERTIES|SERDEPROPERTIES ('p1'='v1', ...) statement.
-*/
-public class AlterTableSetTblProperties extends AlterTableSetStmt {
-  private final TTablePropertyType targetProperty_;
-  private final HashMap<String, String> tblProperties_;
-
-  public AlterTableSetTblProperties(TableName tableName, PartitionSpec partitionSpec,
-      TTablePropertyType targetProperty, HashMap<String, String> tblProperties) {
-    super(tableName, partitionSpec);
-    Preconditions.checkNotNull(tblProperties);
-    Preconditions.checkNotNull(targetProperty);
-    targetProperty_ = targetProperty;
-    tblProperties_ = tblProperties;
-    CreateTableStmt.unescapeProperties(tblProperties_);
-  }
-
-  public HashMap<String, String> getTblProperties() { return tblProperties_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-   TAlterTableParams params = super.toThrift();
-   params.setAlter_type(TAlterTableType.SET_TBL_PROPERTIES);
-   TAlterTableSetTblPropertiesParams tblPropertyParams =
-       new TAlterTableSetTblPropertiesParams();
-   tblPropertyParams.setTarget(targetProperty_);
-   tblPropertyParams.setProperties(tblProperties_);
-   if (partitionSpec_ != null) {
-     tblPropertyParams.setPartition_spec(partitionSpec_.toThrift());
-   }
-   params.setSet_tbl_properties_params(tblPropertyParams);
-   return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-
-    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
-
-    // Check avro schema when it is set in avro.schema.url or avro.schema.literal to
-    // avoid potential metadata corruption (see IMPALA-2042).
-    // If both properties are set then only check avro.schema.literal and ignore
-    // avro.schema.url.
-    if (tblProperties_.containsKey(
-            AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) ||
-        tblProperties_.containsKey(
-            AvroSerdeUtils.AvroTableProperties.SCHEMA_URL.getPropName())) {
-      analyzeAvroSchema(analyzer);
-    }
-
-    // Analyze 'skip.header.line.format' property.
-    analyzeSkipHeaderLineCount(getTargetTable(), tblProperties_);
-  }
-
-  /**
-   * Check that Avro schema provided in avro.schema.url or avro.schema.literal is valid
-   * Json and contains only supported Impala types. If both properties are set, then
-   * avro.schema.url is ignored.
-   */
-  private void analyzeAvroSchema(Analyzer analyzer)
-      throws AnalysisException {
-    List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
-    schemaSearchLocations.add(tblProperties_);
-
-    String avroSchema = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
-    avroSchema = Strings.nullToEmpty(avroSchema);
-    if (avroSchema.isEmpty()) {
-      throw new AnalysisException("Avro schema is null or empty: " +
-          table_.getFullName());
-    }
-
-    // Check if the schema is valid and is supported by Impala
-    try {
-      AvroSchemaParser.parse(avroSchema);
-    } catch (SchemaParseException e) {
-      throw new AnalysisException(String.format(
-          "Error parsing Avro schema for table '%s': %s", table_.getFullName(),
-          e.getMessage()));
-    }
-  }
-
-  /**
-   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
-   * value. It is looked up in 'tblProperties', which must not be null.
-   */
-  public static void analyzeSkipHeaderLineCount(Map<String, String> tblProperties)
-      throws AnalysisException {
-    analyzeSkipHeaderLineCount(null, tblProperties);
-  }
-
-  /**
-   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
-   * value. It is looked up in 'tblProperties', which must not be null. If 'table' is not
-   * null, then the method ensures that 'skip.header.line.count' is supported for its
-   * table type. If it is null, then this check is omitted.
-   */
-  public static void analyzeSkipHeaderLineCount(Table table,
-      Map<String, String> tblProperties) throws AnalysisException {
-    if (tblProperties.containsKey(HdfsTable.TBL_PROP_SKIP_HEADER_LINE_COUNT)) {
-      if (table != null && !(table instanceof HdfsTable)) {
-        throw new AnalysisException(String.format("Table property " +
-            "'skip.header.line.count' is only supported for HDFS tables."));
-      }
-      StringBuilder error = new StringBuilder();
-      HdfsTable.parseSkipHeaderLineCount(tblProperties, error);
-      if (error.length() > 0) throw new AnalysisException(error.toString());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableStmt.java
deleted file mode 100644
index aebd009..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableStmt.java
+++ /dev/null
@@ -1,91 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Base class for all ALTER TABLE statements.
- */
-public abstract class AlterTableStmt extends StatementBase {
-  protected final TableName tableName_;
-
-  // Set during analysis.
-  protected Table table_;
-
-  protected AlterTableStmt(TableName tableName) {
-    Preconditions.checkState(tableName != null && !tableName.isEmpty());
-    tableName_ = tableName;
-    table_ = null;
-  }
-
-  public String getTbl() { return tableName_.getTbl(); }
-
-  /**
-   * Can only be called after analysis, returns the parent database name of the target
-   * table for this ALTER TABLE statement.
-   */
-  public String getDb() {
-    return getTargetTable().getDb().getName();
-  }
-
-  /**
-   * Can only be called after analysis, returns the Table object of the target of this
-   * ALTER TABLE statement.
-   */
-  protected Table getTargetTable() {
-    Preconditions.checkNotNull(table_);
-    return table_;
-  }
-
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = new TAlterTableParams();
-    params.setTable_name(new TTableName(getDb(), getTbl()));
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
-    if (table_ instanceof KuduTable
-        && !(this instanceof AlterTableSetTblProperties)
-        && !(this instanceof AlterTableSetColumnStats)
-        && !(this instanceof AlterTableOrViewRenameStmt)) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on Kudu table: %s", table_.getFullName()));
-    }
-    if (table_ instanceof View) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
-    }
-    if (table_ instanceof DataSourceTable
-        && !(this instanceof AlterTableSetColumnStats)) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on a table produced by a data source: %s",
-          table_.getFullName()));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterViewStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterViewStmt.java
deleted file mode 100644
index 640b3a1..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterViewStmt.java
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.RuntimeEnv;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER VIEW AS statement.
- */
-public class AlterViewStmt extends CreateOrAlterViewStmtBase {
-
-  public AlterViewStmt(TableName tableName, QueryStmt viewDefStmt) {
-    super(false, tableName, null, null, viewDefStmt);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Enforce Hive column labels for view compatibility.
-    analyzer.setUseHiveColLabels(true);
-    viewDefStmt_.analyze(analyzer);
-
-    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    owner_ = analyzer.getUser().getName();
-
-    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
-    Preconditions.checkNotNull(table);
-    if (!(table instanceof View)) {
-      throw new AnalysisException(String.format(
-          "ALTER VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
-    }
-
-    createColumnAndViewDefs(analyzer);
-    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
-      computeLineageGraph(analyzer);
-    }
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("ALTER VIEW ");
-    if (tableName_.getDb() != null) {
-      sb.append(tableName_.getDb() + ".");
-    }
-    sb.append(tableName_.getTbl());
-    sb.append(" AS " + viewDefStmt_.toSql());
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java b/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
deleted file mode 100644
index 006474d..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
+++ /dev/null
@@ -1,544 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.StringReader;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.AuthorizationChecker;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.AuthorizeableColumn;
-import com.cloudera.impala.authorization.AuthorizeableTable;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Wrapper class for parser and analyzer.
- */
-public class AnalysisContext {
-  private final static Logger LOG = LoggerFactory.getLogger(AnalysisContext.class);
-  private final ImpaladCatalog catalog_;
-  private final TQueryCtx queryCtx_;
-  private final AuthorizationConfig authzConfig_;
-
-  // Set in analyze()
-  private AnalysisResult analysisResult_;
-
-  public AnalysisContext(ImpaladCatalog catalog, TQueryCtx queryCtx,
-      AuthorizationConfig authzConfig) {
-    catalog_ = catalog;
-    queryCtx_ = queryCtx;
-    authzConfig_ = authzConfig;
-  }
-
-  static public class AnalysisResult {
-    private StatementBase stmt_;
-    private Analyzer analyzer_;
-    private CreateTableStmt tmpCreateTableStmt_;
-
-    public boolean isAlterTableStmt() { return stmt_ instanceof AlterTableStmt; }
-    public boolean isAlterViewStmt() { return stmt_ instanceof AlterViewStmt; }
-    public boolean isComputeStatsStmt() { return stmt_ instanceof ComputeStatsStmt; }
-    public boolean isQueryStmt() { return stmt_ instanceof QueryStmt; }
-    public boolean isInsertStmt() { return stmt_ instanceof InsertStmt; }
-    public boolean isDropDbStmt() { return stmt_ instanceof DropDbStmt; }
-    public boolean isDropTableOrViewStmt() {
-      return stmt_ instanceof DropTableOrViewStmt;
-    }
-    public boolean isDropFunctionStmt() { return stmt_ instanceof DropFunctionStmt; }
-    public boolean isDropDataSrcStmt() { return stmt_ instanceof DropDataSrcStmt; }
-    public boolean isDropStatsStmt() { return stmt_ instanceof DropStatsStmt; }
-    public boolean isCreateTableLikeStmt() {
-      return stmt_ instanceof CreateTableLikeStmt;
-    }
-    public boolean isCreateViewStmt() { return stmt_ instanceof CreateViewStmt; }
-    public boolean isCreateTableAsSelectStmt() {
-      return stmt_ instanceof CreateTableAsSelectStmt;
-    }
-    public boolean isCreateTableStmt() { return stmt_ instanceof CreateTableStmt; }
-    public boolean isCreateDbStmt() { return stmt_ instanceof CreateDbStmt; }
-    public boolean isCreateUdfStmt() { return stmt_ instanceof CreateUdfStmt; }
-    public boolean isCreateUdaStmt() { return stmt_ instanceof CreateUdaStmt; }
-    public boolean isCreateDataSrcStmt() { return stmt_ instanceof CreateDataSrcStmt; }
-    public boolean isLoadDataStmt() { return stmt_ instanceof LoadDataStmt; }
-    public boolean isUseStmt() { return stmt_ instanceof UseStmt; }
-    public boolean isSetStmt() { return stmt_ instanceof SetStmt; }
-    public boolean isShowTablesStmt() { return stmt_ instanceof ShowTablesStmt; }
-    public boolean isShowDbsStmt() { return stmt_ instanceof ShowDbsStmt; }
-    public boolean isShowDataSrcsStmt() { return stmt_ instanceof ShowDataSrcsStmt; }
-    public boolean isShowStatsStmt() { return stmt_ instanceof ShowStatsStmt; }
-    public boolean isShowFunctionsStmt() { return stmt_ instanceof ShowFunctionsStmt; }
-    public boolean isShowCreateTableStmt() {
-      return stmt_ instanceof ShowCreateTableStmt;
-    }
-    public boolean isShowCreateFunctionStmt() {
-      return stmt_ instanceof ShowCreateFunctionStmt;
-    }
-    public boolean isShowFilesStmt() { return stmt_ instanceof ShowFilesStmt; }
-    public boolean isDescribeDbStmt() { return stmt_ instanceof DescribeDbStmt; }
-    public boolean isDescribeTableStmt() { return stmt_ instanceof DescribeTableStmt; }
-    public boolean isResetMetadataStmt() { return stmt_ instanceof ResetMetadataStmt; }
-    public boolean isExplainStmt() { return stmt_.isExplain(); }
-    public boolean isShowRolesStmt() { return stmt_ instanceof ShowRolesStmt; }
-    public boolean isShowGrantRoleStmt() { return stmt_ instanceof ShowGrantRoleStmt; }
-    public boolean isCreateDropRoleStmt() { return stmt_ instanceof CreateDropRoleStmt; }
-    public boolean isGrantRevokeRoleStmt() {
-      return stmt_ instanceof GrantRevokeRoleStmt;
-    }
-    public boolean isGrantRevokePrivStmt() {
-      return stmt_ instanceof GrantRevokePrivStmt;
-    }
-    public boolean isTruncateStmt() { return stmt_ instanceof TruncateStmt; }
-    public boolean isUpdateStmt() { return stmt_ instanceof UpdateStmt; }
-    public UpdateStmt getUpdateStmt() { return (UpdateStmt) stmt_; }
-    public boolean isDeleteStmt() { return stmt_ instanceof DeleteStmt; }
-    public DeleteStmt getDeleteStmt() { return (DeleteStmt) stmt_; }
-
-    public boolean isCatalogOp() {
-      return isUseStmt() || isViewMetadataStmt() || isDdlStmt();
-    }
-
-    private boolean isDdlStmt() {
-      return isCreateTableLikeStmt() || isCreateTableStmt() ||
-          isCreateViewStmt() || isCreateDbStmt() || isDropDbStmt() ||
-          isDropTableOrViewStmt() || isResetMetadataStmt() || isAlterTableStmt() ||
-          isAlterViewStmt() || isComputeStatsStmt() || isCreateUdfStmt() ||
-          isCreateUdaStmt() || isDropFunctionStmt() || isCreateTableAsSelectStmt() ||
-          isCreateDataSrcStmt() || isDropDataSrcStmt() || isDropStatsStmt() ||
-          isCreateDropRoleStmt() || isGrantRevokeStmt() || isTruncateStmt();
-    }
-
-    private boolean isViewMetadataStmt() {
-      return isShowFilesStmt() || isShowTablesStmt() || isShowDbsStmt() ||
-          isShowFunctionsStmt() || isShowRolesStmt() || isShowGrantRoleStmt() ||
-          isShowCreateTableStmt() || isShowDataSrcsStmt() || isShowStatsStmt() ||
-          isDescribeTableStmt() || isDescribeDbStmt() || isShowCreateFunctionStmt();
-    }
-
-    private boolean isGrantRevokeStmt() {
-      return isGrantRevokeRoleStmt() || isGrantRevokePrivStmt();
-    }
-
-    public boolean isDmlStmt() {
-      return isInsertStmt();
-    }
-
-    public AlterTableStmt getAlterTableStmt() {
-      Preconditions.checkState(isAlterTableStmt());
-      return (AlterTableStmt) stmt_;
-    }
-
-    public AlterViewStmt getAlterViewStmt() {
-      Preconditions.checkState(isAlterViewStmt());
-      return (AlterViewStmt) stmt_;
-    }
-
-    public ComputeStatsStmt getComputeStatsStmt() {
-      Preconditions.checkState(isComputeStatsStmt());
-      return (ComputeStatsStmt) stmt_;
-    }
-
-    public CreateTableLikeStmt getCreateTableLikeStmt() {
-      Preconditions.checkState(isCreateTableLikeStmt());
-      return (CreateTableLikeStmt) stmt_;
-    }
-
-    public CreateViewStmt getCreateViewStmt() {
-      Preconditions.checkState(isCreateViewStmt());
-      return (CreateViewStmt) stmt_;
-    }
-
-    public CreateTableAsSelectStmt getCreateTableAsSelectStmt() {
-      Preconditions.checkState(isCreateTableAsSelectStmt());
-      return (CreateTableAsSelectStmt) stmt_;
-    }
-
-    public CreateTableStmt getCreateTableStmt() {
-      Preconditions.checkState(isCreateTableStmt());
-      return (CreateTableStmt) stmt_;
-    }
-
-    public CreateTableStmt getTmpCreateTableStmt() {
-      return tmpCreateTableStmt_;
-    }
-
-    public CreateDbStmt getCreateDbStmt() {
-      Preconditions.checkState(isCreateDbStmt());
-      return (CreateDbStmt) stmt_;
-    }
-
-    public CreateUdfStmt getCreateUdfStmt() {
-      Preconditions.checkState(isCreateUdfStmt());
-      return (CreateUdfStmt) stmt_;
-    }
-
-    public CreateUdaStmt getCreateUdaStmt() {
-      Preconditions.checkState(isCreateUdfStmt());
-      return (CreateUdaStmt) stmt_;
-    }
-
-    public DropDbStmt getDropDbStmt() {
-      Preconditions.checkState(isDropDbStmt());
-      return (DropDbStmt) stmt_;
-    }
-
-    public DropTableOrViewStmt getDropTableOrViewStmt() {
-      Preconditions.checkState(isDropTableOrViewStmt());
-      return (DropTableOrViewStmt) stmt_;
-    }
-
-    public TruncateStmt getTruncateStmt() {
-      Preconditions.checkState(isTruncateStmt());
-      return (TruncateStmt) stmt_;
-    }
-
-    public DropFunctionStmt getDropFunctionStmt() {
-      Preconditions.checkState(isDropFunctionStmt());
-      return (DropFunctionStmt) stmt_;
-    }
-
-    public LoadDataStmt getLoadDataStmt() {
-      Preconditions.checkState(isLoadDataStmt());
-      return (LoadDataStmt) stmt_;
-    }
-
-    public QueryStmt getQueryStmt() {
-      Preconditions.checkState(isQueryStmt());
-      return (QueryStmt) stmt_;
-    }
-
-    public InsertStmt getInsertStmt() {
-      if (isCreateTableAsSelectStmt()) {
-        return getCreateTableAsSelectStmt().getInsertStmt();
-      } else {
-        Preconditions.checkState(isInsertStmt());
-        return (InsertStmt) stmt_;
-      }
-    }
-
-    public UseStmt getUseStmt() {
-      Preconditions.checkState(isUseStmt());
-      return (UseStmt) stmt_;
-    }
-
-    public SetStmt getSetStmt() {
-      Preconditions.checkState(isSetStmt());
-      return (SetStmt) stmt_;
-    }
-
-    public ShowTablesStmt getShowTablesStmt() {
-      Preconditions.checkState(isShowTablesStmt());
-      return (ShowTablesStmt) stmt_;
-    }
-
-    public ShowDbsStmt getShowDbsStmt() {
-      Preconditions.checkState(isShowDbsStmt());
-      return (ShowDbsStmt) stmt_;
-    }
-
-    public ShowDataSrcsStmt getShowDataSrcsStmt() {
-      Preconditions.checkState(isShowDataSrcsStmt());
-      return (ShowDataSrcsStmt) stmt_;
-    }
-
-    public ShowStatsStmt getShowStatsStmt() {
-      Preconditions.checkState(isShowStatsStmt());
-      return (ShowStatsStmt) stmt_;
-    }
-
-    public ShowFunctionsStmt getShowFunctionsStmt() {
-      Preconditions.checkState(isShowFunctionsStmt());
-      return (ShowFunctionsStmt) stmt_;
-    }
-
-    public ShowFilesStmt getShowFilesStmt() {
-      Preconditions.checkState(isShowFilesStmt());
-      return (ShowFilesStmt) stmt_;
-    }
-
-    public DescribeDbStmt getDescribeDbStmt() {
-      Preconditions.checkState(isDescribeDbStmt());
-      return (DescribeDbStmt) stmt_;
-    }
-
-    public DescribeTableStmt getDescribeTableStmt() {
-      Preconditions.checkState(isDescribeTableStmt());
-      return (DescribeTableStmt) stmt_;
-    }
-
-    public ShowCreateTableStmt getShowCreateTableStmt() {
-      Preconditions.checkState(isShowCreateTableStmt());
-      return (ShowCreateTableStmt) stmt_;
-    }
-
-    public ShowCreateFunctionStmt getShowCreateFunctionStmt() {
-      Preconditions.checkState(isShowCreateFunctionStmt());
-      return (ShowCreateFunctionStmt) stmt_;
-    }
-
-    public StatementBase getStmt() { return stmt_; }
-    public Analyzer getAnalyzer() { return analyzer_; }
-    public Set<TAccessEvent> getAccessEvents() { return analyzer_.getAccessEvents(); }
-    public boolean requiresRewrite() {
-      return analyzer_.containsSubquery() && !(stmt_ instanceof CreateViewStmt)
-          && !(stmt_ instanceof AlterViewStmt);
-    }
-    public TLineageGraph getThriftLineageGraph() {
-      return analyzer_.getThriftSerializedLineageGraph();
-    }
-  }
-
-  /**
-   * Parse and analyze 'stmt'. If 'stmt' is a nested query (i.e. query that
-   * contains subqueries), it is also rewritten by performing subquery unnesting.
-   * The transformed stmt is then re-analyzed in a new analysis context.
-   *
-   * The result of analysis can be retrieved by calling
-   * getAnalysisResult().
-   *
-   * @throws AnalysisException
-   *           On any other error, including parsing errors. Also thrown when any
-   *           missing tables are detected as a result of running analysis.
-   */
-  public void analyze(String stmt) throws AnalysisException {
-    Analyzer analyzer = new Analyzer(catalog_, queryCtx_, authzConfig_);
-    analyze(stmt, analyzer);
-  }
-
-  /**
-   * Parse and analyze 'stmt' using a specified Analyzer.
-   */
-  public void analyze(String stmt, Analyzer analyzer) throws AnalysisException {
-    SqlScanner input = new SqlScanner(new StringReader(stmt));
-    SqlParser parser = new SqlParser(input);
-    try {
-      analysisResult_ = new AnalysisResult();
-      analysisResult_.analyzer_ = analyzer;
-      if (analysisResult_.analyzer_ == null) {
-        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
-      }
-      analysisResult_.stmt_ = (StatementBase) parser.parse().value;
-      if (analysisResult_.stmt_ == null) return;
-
-      // For CTAS, we copy the create statement in case we have to create a new CTAS
-      // statement after a query rewrite.
-      if (analysisResult_.stmt_ instanceof CreateTableAsSelectStmt) {
-        analysisResult_.tmpCreateTableStmt_ =
-            ((CreateTableAsSelectStmt)analysisResult_.stmt_).getCreateStmt().clone();
-      }
-
-      analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
-      boolean isExplain = analysisResult_.isExplainStmt();
-
-      // Check if we need to rewrite the statement.
-      if (analysisResult_.requiresRewrite()) {
-        StatementBase rewrittenStmt = StmtRewriter.rewrite(analysisResult_);
-        // Re-analyze the rewritten statement.
-        Preconditions.checkNotNull(rewrittenStmt);
-        analysisResult_ = new AnalysisResult();
-        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
-        analysisResult_.stmt_ = rewrittenStmt;
-        analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
-        LOG.trace("rewrittenStmt: " + rewrittenStmt.toSql());
-        if (isExplain) analysisResult_.stmt_.setIsExplain();
-        Preconditions.checkState(!analysisResult_.requiresRewrite());
-      }
-    } catch (AnalysisException e) {
-      // Don't wrap AnalysisExceptions in another AnalysisException
-      throw e;
-    } catch (Exception e) {
-      throw new AnalysisException(parser.getErrorMsg(stmt), e);
-    }
-  }
-
-  /**
-   * Authorize an analyzed statement.
-   * analyze() must have already been called. Throws an AuthorizationException if the
-   * user doesn't have sufficient privileges to run this statement.
-   */
-  public void authorize(AuthorizationChecker authzChecker)
-      throws AuthorizationException, InternalException {
-    Preconditions.checkNotNull(analysisResult_);
-    Analyzer analyzer = getAnalyzer();
-    // Process statements for which column-level privilege requests may be registered
-    // except for DESCRIBE TABLE or REFRESH/INVALIDATE statements
-    if (analysisResult_.isQueryStmt() || analysisResult_.isInsertStmt() ||
-        analysisResult_.isUpdateStmt() || analysisResult_.isDeleteStmt() ||
-        analysisResult_.isCreateTableAsSelectStmt() ||
-        analysisResult_.isCreateViewStmt() || analysisResult_.isAlterViewStmt()) {
-      // Map of table name to a list of privilege requests associated with that table.
-      // These include both table-level and column-level privilege requests.
-      Map<String, List<PrivilegeRequest>> tablePrivReqs = Maps.newHashMap();
-      // Privilege requests that are not column or table-level.
-      List<PrivilegeRequest> otherPrivReqs = Lists.newArrayList();
-      // Group the registered privilege requests based on the table they reference.
-      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
-        String tableName = privReq.getAuthorizeable().getFullTableName();
-        if (tableName == null) {
-          otherPrivReqs.add(privReq);
-        } else {
-          List<PrivilegeRequest> requests = tablePrivReqs.get(tableName);
-          if (requests == null) {
-            requests = Lists.newArrayList();
-            tablePrivReqs.put(tableName, requests);
-          }
-          // The table-level SELECT must be the first table-level request, and it
-          // must precede all column-level privilege requests.
-          Preconditions.checkState((requests.isEmpty() ||
-              !(privReq.getAuthorizeable() instanceof AuthorizeableColumn)) ||
-              (requests.get(0).getAuthorizeable() instanceof AuthorizeableTable &&
-              requests.get(0).getPrivilege() == Privilege.SELECT));
-          requests.add(privReq);
-        }
-      }
-
-      // Check any non-table, non-column privilege requests first.
-      for (PrivilegeRequest request: otherPrivReqs) {
-        authorizePrivilegeRequest(authzChecker, request);
-      }
-
-      // Authorize table accesses, one table at a time, by considering both table and
-      // column-level privilege requests.
-      for (Map.Entry<String, List<PrivilegeRequest>> entry: tablePrivReqs.entrySet()) {
-        authorizeTableAccess(authzChecker, entry.getValue());
-      }
-    } else {
-      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
-        Preconditions.checkState(
-            !(privReq.getAuthorizeable() instanceof AuthorizeableColumn) ||
-            analysisResult_.isDescribeTableStmt() ||
-            analysisResult_.isResetMetadataStmt());
-        authorizePrivilegeRequest(authzChecker, privReq);
-      }
-    }
-
-    // Check any masked requests.
-    for (Pair<PrivilegeRequest, String> maskedReq: analyzer.getMaskedPrivilegeReqs()) {
-      if (!authzChecker.hasAccess(analyzer.getUser(), maskedReq.first)) {
-        throw new AuthorizationException(maskedReq.second);
-      }
-    }
-  }
-
-  /**
-   * Authorize a privilege request.
-   * Throws an AuthorizationException if the user doesn't have sufficient privileges for
-   * this request. Also, checks if the request references a system database.
-   */
-  private void authorizePrivilegeRequest(AuthorizationChecker authzChecker,
-    PrivilegeRequest request) throws AuthorizationException, InternalException {
-    Preconditions.checkNotNull(request);
-    String dbName = null;
-    if (request.getAuthorizeable() != null) {
-      dbName = request.getAuthorizeable().getDbName();
-    }
-    // If this is a system database, some actions should always be allowed
-    // or disabled, regardless of what is in the auth policy.
-    if (dbName != null && checkSystemDbAccess(dbName, request.getPrivilege())) {
-      return;
-    }
-    authzChecker.checkAccess(getAnalyzer().getUser(), request);
-  }
-
-  /**
-   * Authorize a list of privilege requests associated with a single table.
-   * It checks if the user has sufficient table-level privileges and if that is
-   * not the case, it falls back on checking column-level privileges, if any. This
-   * function requires 'SELECT' requests to be ordered by table and then by column
-   * privilege requests. Throws an AuthorizationException if the user doesn't have
-   * sufficient privileges.
-   */
-  private void authorizeTableAccess(AuthorizationChecker authzChecker,
-      List<PrivilegeRequest> requests)
-      throws AuthorizationException, InternalException {
-    Preconditions.checkState(!requests.isEmpty());
-    Analyzer analyzer = getAnalyzer();
-    boolean hasTableSelectPriv = true;
-    boolean hasColumnSelectPriv = false;
-    for (PrivilegeRequest request: requests) {
-      if (request.getAuthorizeable() instanceof AuthorizeableTable) {
-        try {
-          authorizePrivilegeRequest(authzChecker, request);
-        } catch (AuthorizationException e) {
-          // Authorization fails if we fail to authorize any table-level request that is
-          // not a SELECT privilege (e.g. INSERT).
-          if (request.getPrivilege() != Privilege.SELECT) throw e;
-          hasTableSelectPriv = false;
-        }
-      } else {
-        Preconditions.checkState(
-            request.getAuthorizeable() instanceof AuthorizeableColumn);
-        if (hasTableSelectPriv) continue;
-        if (authzChecker.hasAccess(analyzer.getUser(), request)) {
-          hasColumnSelectPriv = true;
-          continue;
-        }
-        // Make sure we don't reveal any column names in the error message.
-        throw new AuthorizationException(String.format("User '%s' does not have " +
-          "privileges to execute '%s' on: %s", analyzer.getUser().getName(),
-          request.getPrivilege().toString(),
-          request.getAuthorizeable().getFullTableName()));
-      }
-    }
-    if (!hasTableSelectPriv && !hasColumnSelectPriv) {
-       throw new AuthorizationException(String.format("User '%s' does not have " +
-          "privileges to execute 'SELECT' on: %s", analyzer.getUser().getName(),
-          requests.get(0).getAuthorizeable().getFullTableName()));
-    }
-  }
-
-  /**
-   * Throws an AuthorizationException if the dbName is a system db
-   * and the user is trying to modify it.
-   * Returns true if this is a system db and the action is allowed.
-   */
-  private boolean checkSystemDbAccess(String dbName, Privilege privilege)
-      throws AuthorizationException {
-    Db db = catalog_.getDb(dbName);
-    if (db != null && db.isSystemDb()) {
-      switch (privilege) {
-        case VIEW_METADATA:
-        case ANY:
-          return true;
-        default:
-          throw new AuthorizationException("Cannot modify system database.");
-      }
-    }
-    return false;
-  }
-
-  public AnalysisResult getAnalysisResult() { return analysisResult_; }
-  public Analyzer getAnalyzer() { return getAnalysisResult().getAnalyzer(); }
-}


[54/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
index 905d68d..6db4ae4 100644
--- a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,7 +24,7 @@ import java.util.Map;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.IdGenerator;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PipelinedPlanNodeSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PipelinedPlanNodeSet.java b/fe/src/main/java/org/apache/impala/planner/PipelinedPlanNodeSet.java
index 6714213..249987a 100644
--- a/fe/src/main/java/org/apache/impala/planner/PipelinedPlanNodeSet.java
+++ b/fe/src/main/java/org/apache/impala/planner/PipelinedPlanNodeSet.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,7 +24,7 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlanFragment.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanFragment.java b/fe/src/main/java/org/apache/impala/planner/PlanFragment.java
index 48a71dc..0e5134f 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanFragment.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanFragment.java
@@ -15,30 +15,30 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.cloudera.impala.thrift.TPlan;
-import com.cloudera.impala.thrift.TPlanFragment;
-import com.cloudera.impala.thrift.TPlanFragmentTree;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.common.TreeNode;
+import org.apache.impala.planner.JoinNode.DistributionMode;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPartitionType;
+import org.apache.impala.thrift.TPlan;
+import org.apache.impala.thrift.TPlanFragment;
+import org.apache.impala.thrift.TPlanFragmentTree;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlanFragmentId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanFragmentId.java b/fe/src/main/java/org/apache/impala/planner/PlanFragmentId.java
index 98b08fe..99c96e9 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanFragmentId.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanFragmentId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class PlanFragmentId extends Id<PlanFragmentId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlanId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanId.java b/fe/src/main/java/org/apache/impala/planner/PlanId.java
index 2cecbd8..72ed46f 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanId.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class PlanId extends Id<PlanId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanNode.java b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
index d38f10a..3350364 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -26,21 +26,21 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprId;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
-import com.cloudera.impala.thrift.TExecStats;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlan;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprId;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.common.TreeNode;
+import org.apache.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
+import org.apache.impala.thrift.TExecStats;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlan;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlanNodeId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanNodeId.java b/fe/src/main/java/org/apache/impala/planner/PlanNodeId.java
index d161e2b..2afad37 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanNodeId.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanNodeId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class PlanNodeId extends Id<PlanNodeId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/Planner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java b/fe/src/main/java/org/apache/impala/planner/Planner.java
index df90df3..e50aca5 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -24,25 +24,25 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.ColumnLineageGraph;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryExecRequest;
-import com.cloudera.impala.thrift.TRuntimeFilterMode;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.MaxRowsProcessedVisitor;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.ColumnLineageGraph;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.InsertStmt;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryExecRequest;
+import org.apache.impala.thrift.TRuntimeFilterMode;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.util.MaxRowsProcessedVisitor;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/PlannerContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/PlannerContext.java b/fe/src/main/java/org/apache/impala/planner/PlannerContext.java
index fc11287..29cca13 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlannerContext.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlannerContext.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.LinkedList;
 
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.common.IdGenerator;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
index f0cafd5..f4535a5 100644
--- a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
+++ b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.Collections;
 import java.util.Comparator;
@@ -24,22 +24,22 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.Predicate;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.thrift.TRuntimeFilterDesc;
-import com.cloudera.impala.thrift.TRuntimeFilterTargetDesc;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.Predicate;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotId;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.analysis.TupleIsNullPredicate;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.IdGenerator;
+import org.apache.impala.planner.PlanNode;
+import org.apache.impala.thrift.TRuntimeFilterDesc;
+import org.apache.impala.thrift.TRuntimeFilterTargetDesc;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/RuntimeFilterId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterId.java b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterId.java
index 32af78c..9253207 100644
--- a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterId.java
+++ b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class RuntimeFilterId extends Id<RuntimeFilterId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/ScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ScanNode.java b/fe/src/main/java/org/apache/impala/planner/ScanNode.java
index b442b4a..f444ffa 100644
--- a/fe/src/main/java/org/apache/impala/planner/ScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/ScanNode.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TScanRangeLocations;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TScanRangeLocations;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/SelectNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SelectNode.java b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
index b418224..7713520 100644
--- a/fe/src/main/java/org/apache/impala/planner/SelectNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index 2212d35..b686fe6 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -30,43 +30,43 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.AnalyticInfo;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BaseTableRef;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate.Operator;
-import com.cloudera.impala.analysis.CollectionTableRef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprId;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.InlineViewRef;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.SelectStmt;
-import com.cloudera.impala.analysis.SingularRowSrcTableRef;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TableRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.analysis.UnionStmt;
-import com.cloudera.impala.analysis.UnionStmt.UnionOperand;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.Pair;
+import org.apache.impala.analysis.AggregateInfo;
+import org.apache.impala.analysis.AnalyticInfo;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BaseTableRef;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.BinaryPredicate.Operator;
+import org.apache.impala.analysis.CollectionTableRef;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprId;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.InlineViewRef;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.analysis.SelectStmt;
+import org.apache.impala.analysis.SingularRowSrcTableRef;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotId;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.TableRef;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.analysis.TupleIsNullPredicate;
+import org.apache.impala.analysis.UnionStmt;
+import org.apache.impala.analysis.UnionStmt.UnionOperand;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.common.Pair;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
@@ -1126,7 +1126,7 @@ public class SingleNodePlanner {
     Predicate<Expr> isIdentityPredicate = new Predicate<Expr>() {
       @Override
       public boolean apply(Expr expr) {
-        return com.cloudera.impala.analysis.Predicate.isEquivalencePredicate(expr)
+        return org.apache.impala.analysis.Predicate.isEquivalencePredicate(expr)
             && ((BinaryPredicate) expr).isInferred()
             && expr.getChild(0).equals(expr.getChild(1));
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/SingularRowSrcNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingularRowSrcNode.java b/fe/src/main/java/org/apache/impala/planner/SingularRowSrcNode.java
index 88b3d7d..5b66d18 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingularRowSrcNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingularRowSrcNode.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/SortNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SortNode.java b/fe/src/main/java/org/apache/impala/planner/SortNode.java
index 02f0df6..3a71f8c 100644
--- a/fe/src/main/java/org/apache/impala/planner/SortNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SortNode.java
@@ -15,27 +15,27 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TSortInfo;
-import com.cloudera.impala.thrift.TSortNode;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.SortInfo;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TSortInfo;
+import org.apache.impala.thrift.TSortNode;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/SubplanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SubplanNode.java b/fe/src/main/java/org/apache/impala/planner/SubplanNode.java
index 4512268..6143255 100644
--- a/fe/src/main/java/org/apache/impala/planner/SubplanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SubplanNode.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/TableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/TableSink.java b/fe/src/main/java/org/apache/impala/planner/TableSink.java
index ae73b54..788bb50 100644
--- a/fe/src/main/java/org/apache/impala/planner/TableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/TableSink.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TSinkAction;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.thrift.TSinkAction;
 import com.google.common.base.Preconditions;
 
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/UnionNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/UnionNode.java b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
index ef67277..a451ba0 100644
--- a/fe/src/main/java/org/apache/impala/planner/UnionNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -25,16 +25,16 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnionNode;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TUnionNode;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/UnnestNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/UnnestNode.java b/fe/src/main/java/org/apache/impala/planner/UnnestNode.java
index 3a45882..35abc55 100644
--- a/fe/src/main/java/org/apache/impala/planner/UnnestNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/UnnestNode.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.CollectionTableRef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnnestNode;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.CollectionTableRef;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TUnnestNode;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/ValueRange.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ValueRange.java b/fe/src/main/java/org/apache/impala/planner/ValueRange.java
index e76da9d..492dd3d 100644
--- a/fe/src/main/java/org/apache/impala/planner/ValueRange.java
+++ b/fe/src/main/java/org/apache/impala/planner/ValueRange.java
@@ -15,19 +15,19 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.Predicate;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.CompoundPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.Predicate;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.FeSupport;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/BackendConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/BackendConfig.java b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
index e98ab571..75dd1f7 100644
--- a/fe/src/main/java/org/apache/impala/service/BackendConfig.java
+++ b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 /**
  * This class is meant to provide the FE with impalad backend configuration parameters,

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 4814503..780bf13 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -52,103 +52,103 @@ import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnNotFoundException;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.HiveStorageDescriptorFactory;
-import com.cloudera.impala.catalog.IncompleteTable;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.catalog.PartitionNotFoundException;
-import com.cloudera.impala.catalog.PartitionStatsUtil;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.TableNotFoundException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.catalog.delegates.DdlDelegate;
-import com.cloudera.impala.catalog.delegates.KuduDdlDelegate;
-import com.cloudera.impala.catalog.delegates.UnsupportedOpDelegate;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.JniCatalogConstants;
-import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
-import com.cloudera.impala.thrift.TAlterTableChangeColParams;
-import com.cloudera.impala.thrift.TAlterTableDropColParams;
-import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
-import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
-import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
-import com.cloudera.impala.thrift.TAlterTableSetTblPropertiesParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCatalogUpdateResult;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnStats;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TCreateDataSourceParams;
-import com.cloudera.impala.thrift.TCreateDbParams;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
-import com.cloudera.impala.thrift.TCreateFunctionParams;
-import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
-import com.cloudera.impala.thrift.TCreateTableLikeParams;
-import com.cloudera.impala.thrift.TCreateTableParams;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TDdlExecResponse;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.thrift.TDropDataSourceParams;
-import com.cloudera.impala.thrift.TDropDbParams;
-import com.cloudera.impala.thrift.TDropFunctionParams;
-import com.cloudera.impala.thrift.TDropStatsParams;
-import com.cloudera.impala.thrift.TDropTableOrViewParams;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
-import com.cloudera.impala.thrift.THdfsCachingOp;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TResetMetadataResponse;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.thrift.TTruncateParams;
-import com.cloudera.impala.thrift.TUpdateCatalogRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogResponse;
-import com.cloudera.impala.util.HdfsCachingUtil;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.CatalogServiceCatalog;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.ColumnNotFoundException;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.DatabaseNotFoundException;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.HiveStorageDescriptorFactory;
+import org.apache.impala.catalog.IncompleteTable;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.catalog.PartitionNotFoundException;
+import org.apache.impala.catalog.PartitionStatsUtil;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.catalog.RolePrivilege;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.TableNotFoundException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.View;
+import org.apache.impala.catalog.delegates.DdlDelegate;
+import org.apache.impala.catalog.delegates.KuduDdlDelegate;
+import org.apache.impala.catalog.delegates.UnsupportedOpDelegate;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.JniCatalogConstants;
+import org.apache.impala.thrift.TAlterTableAddPartitionParams;
+import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
+import org.apache.impala.thrift.TAlterTableChangeColParams;
+import org.apache.impala.thrift.TAlterTableDropColParams;
+import org.apache.impala.thrift.TAlterTableDropPartitionParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableSetCachedParams;
+import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
+import org.apache.impala.thrift.TAlterTableSetLocationParams;
+import org.apache.impala.thrift.TAlterTableSetTblPropertiesParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.TAlterTableUpdateStatsParams;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TCatalogUpdateResult;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnStats;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TCreateDataSourceParams;
+import org.apache.impala.thrift.TCreateDbParams;
+import org.apache.impala.thrift.TCreateDropRoleParams;
+import org.apache.impala.thrift.TCreateFunctionParams;
+import org.apache.impala.thrift.TCreateOrAlterViewParams;
+import org.apache.impala.thrift.TCreateTableLikeParams;
+import org.apache.impala.thrift.TCreateTableParams;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TDdlExecRequest;
+import org.apache.impala.thrift.TDdlExecResponse;
+import org.apache.impala.thrift.TDistributeParam;
+import org.apache.impala.thrift.TDropDataSourceParams;
+import org.apache.impala.thrift.TDropDbParams;
+import org.apache.impala.thrift.TDropFunctionParams;
+import org.apache.impala.thrift.TDropStatsParams;
+import org.apache.impala.thrift.TDropTableOrViewParams;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TGrantRevokePrivParams;
+import org.apache.impala.thrift.TGrantRevokeRoleParams;
+import org.apache.impala.thrift.THdfsCachingOp;
+import org.apache.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TPartitionStats;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TResetMetadataRequest;
+import org.apache.impala.thrift.TResetMetadataResponse;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.thrift.TTableStats;
+import org.apache.impala.thrift.TTruncateParams;
+import org.apache.impala.thrift.TUpdateCatalogRequest;
+import org.apache.impala.thrift.TUpdateCatalogResponse;
+import org.apache.impala.util.HdfsCachingUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java b/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
index c1a9557..eb917a6 100644
--- a/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
+++ b/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.util.List;
 import java.util.Map;
@@ -26,15 +26,15 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
 
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TResultRow;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.thrift.TDescribeResult;
+import org.apache.impala.thrift.TResultRow;
 import com.google.common.collect.Lists;
 
 /*

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/FeSupport.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/FeSupport.java b/fe/src/main/java/org/apache/impala/service/FeSupport.java
index 4014129..f982df3 100644
--- a/fe/src/main/java/org/apache/impala/service/FeSupport.java
+++ b/fe/src/main/java/org/apache/impala/service/FeSupport.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -28,28 +28,28 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TCacheJarParams;
-import com.cloudera.impala.thrift.TCacheJarResult;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCatalogServiceRequestHeader;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprBatch;
-import com.cloudera.impala.thrift.TPrioritizeLoadRequest;
-import com.cloudera.impala.thrift.TPrioritizeLoadResponse;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TStartupOptions;
-import com.cloudera.impala.thrift.TSymbolLookupParams;
-import com.cloudera.impala.thrift.TSymbolLookupResult;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.util.NativeLibUtil;
+import org.apache.impala.analysis.BoolLiteral;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TCacheJarParams;
+import org.apache.impala.thrift.TCacheJarResult;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TCatalogServiceRequestHeader;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TExprBatch;
+import org.apache.impala.thrift.TPrioritizeLoadRequest;
+import org.apache.impala.thrift.TPrioritizeLoadResponse;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TStartupOptions;
+import org.apache.impala.thrift.TSymbolLookupParams;
+import org.apache.impala.thrift.TSymbolLookupResult;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.util.NativeLibUtil;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 2d38396..fe1f8f1 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,7 +35,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
-import com.cloudera.impala.catalog.KuduTable;
+import org.apache.impala.catalog.KuduTable;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hive.service.cli.thrift.TGetColumnsReq;
@@ -45,101 +45,101 @@ import org.apache.hive.service.cli.thrift.TGetTablesReq;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.CreateDataSrcStmt;
-import com.cloudera.impala.analysis.CreateDropRoleStmt;
-import com.cloudera.impala.analysis.CreateUdaStmt;
-import com.cloudera.impala.analysis.CreateUdfStmt;
-import com.cloudera.impala.analysis.DropDataSrcStmt;
-import com.cloudera.impala.analysis.DropFunctionStmt;
-import com.cloudera.impala.analysis.DropStatsStmt;
-import com.cloudera.impala.analysis.DropTableOrViewStmt;
-import com.cloudera.impala.analysis.GrantRevokePrivStmt;
-import com.cloudera.impala.analysis.GrantRevokeRoleStmt;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.ResetMetadataStmt;
-import com.cloudera.impala.analysis.ShowFunctionsStmt;
-import com.cloudera.impala.analysis.ShowGrantRoleStmt;
-import com.cloudera.impala.analysis.ShowRolesStmt;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.analysis.TruncateStmt;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.authorization.AuthorizationChecker;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.ImpalaInternalAdminUser;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableId;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.planner.PlanFragment;
-import com.cloudera.impala.planner.Planner;
-import com.cloudera.impala.planner.ScanNode;
-import com.cloudera.impala.thrift.TCatalogOpRequest;
-import com.cloudera.impala.thrift.TCatalogOpType;
-import com.cloudera.impala.thrift.TCatalogServiceRequestHeader;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TDdlType;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TExecRequest;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExplainResult;
-import com.cloudera.impala.thrift.TFinalizeParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TLoadDataResp;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TPlanFragment;
-import com.cloudera.impala.thrift.TPlanFragmentTree;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryExecRequest;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TStmtType;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheResponse;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.cloudera.impala.util.EventSequence;
-import com.cloudera.impala.util.MembershipSnapshot;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.cloudera.impala.util.TSessionStateUtil;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.CreateDataSrcStmt;
+import org.apache.impala.analysis.CreateDropRoleStmt;
+import org.apache.impala.analysis.CreateUdaStmt;
+import org.apache.impala.analysis.CreateUdfStmt;
+import org.apache.impala.analysis.DropDataSrcStmt;
+import org.apache.impala.analysis.DropFunctionStmt;
+import org.apache.impala.analysis.DropStatsStmt;
+import org.apache.impala.analysis.DropTableOrViewStmt;
+import org.apache.impala.analysis.GrantRevokePrivStmt;
+import org.apache.impala.analysis.GrantRevokeRoleStmt;
+import org.apache.impala.analysis.InsertStmt;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.analysis.ResetMetadataStmt;
+import org.apache.impala.analysis.ShowFunctionsStmt;
+import org.apache.impala.analysis.ShowGrantRoleStmt;
+import org.apache.impala.analysis.ShowRolesStmt;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.analysis.TruncateStmt;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.authorization.AuthorizationChecker;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.ImpalaInternalAdminUser;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.DatabaseNotFoundException;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableId;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.planner.PlanFragment;
+import org.apache.impala.planner.Planner;
+import org.apache.impala.planner.ScanNode;
+import org.apache.impala.thrift.TCatalogOpRequest;
+import org.apache.impala.thrift.TCatalogOpType;
+import org.apache.impala.thrift.TCatalogServiceRequestHeader;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TCreateDropRoleParams;
+import org.apache.impala.thrift.TDdlExecRequest;
+import org.apache.impala.thrift.TDdlType;
+import org.apache.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.thrift.TDescribeResult;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TExecRequest;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TExplainResult;
+import org.apache.impala.thrift.TFinalizeParams;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TGrantRevokePrivParams;
+import org.apache.impala.thrift.TGrantRevokeRoleParams;
+import org.apache.impala.thrift.TLineageGraph;
+import org.apache.impala.thrift.TLoadDataReq;
+import org.apache.impala.thrift.TLoadDataResp;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TPlanFragment;
+import org.apache.impala.thrift.TPlanFragmentTree;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryExecRequest;
+import org.apache.impala.thrift.TResetMetadataRequest;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TShowFilesParams;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TStmtType;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.thrift.TUpdateCatalogCacheRequest;
+import org.apache.impala.thrift.TUpdateCatalogCacheResponse;
+import org.apache.impala.thrift.TUpdateMembershipRequest;
+import org.apache.impala.util.EventSequence;
+import org.apache.impala.util.MembershipSnapshot;
+import org.apache.impala.util.PatternMatcher;
+import org.apache.impala.util.TResultRowBuilder;
+import org.apache.impala.util.TSessionStateUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/JniCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/JniCatalog.java b/fe/src/main/java/org/apache/impala/service/JniCatalog.java
index bc71a49..fc8deaf 100644
--- a/fe/src/main/java/org/apache/impala/service/JniCatalog.java
+++ b/fe/src/main/java/org/apache/impala/service/JniCatalog.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -27,35 +27,35 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TGetAllCatalogObjectsResponse;
-import com.cloudera.impala.thrift.TGetDbsParams;
-import com.cloudera.impala.thrift.TGetDbsResult;
-import com.cloudera.impala.thrift.TGetFunctionsRequest;
-import com.cloudera.impala.thrift.TGetFunctionsResponse;
-import com.cloudera.impala.thrift.TGetTablesParams;
-import com.cloudera.impala.thrift.TGetTablesResult;
-import com.cloudera.impala.thrift.TLogLevel;
-import com.cloudera.impala.thrift.TPrioritizeLoadRequest;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TSentryAdminCheckRequest;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogRequest;
-import com.cloudera.impala.util.GlogAppender;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.CatalogServiceCatalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TDdlExecRequest;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TGetAllCatalogObjectsResponse;
+import org.apache.impala.thrift.TGetDbsParams;
+import org.apache.impala.thrift.TGetDbsResult;
+import org.apache.impala.thrift.TGetFunctionsRequest;
+import org.apache.impala.thrift.TGetFunctionsResponse;
+import org.apache.impala.thrift.TGetTablesParams;
+import org.apache.impala.thrift.TGetTablesResult;
+import org.apache.impala.thrift.TLogLevel;
+import org.apache.impala.thrift.TPrioritizeLoadRequest;
+import org.apache.impala.thrift.TResetMetadataRequest;
+import org.apache.impala.thrift.TSentryAdminCheckRequest;
+import org.apache.impala.thrift.TUniqueId;
+import org.apache.impala.thrift.TUpdateCatalogRequest;
+import org.apache.impala.util.GlogAppender;
+import org.apache.impala.util.PatternMatcher;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/JniFrontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index af9c00c..0b4ce8f 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,55 +41,55 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.ToSqlUtils;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.ImpalaInternalAdminUser;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDescribeDbParams;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TDescribeTableParams;
-import com.cloudera.impala.thrift.TExecRequest;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TGetAllHadoopConfigsResponse;
-import com.cloudera.impala.thrift.TGetDataSrcsParams;
-import com.cloudera.impala.thrift.TGetDataSrcsResult;
-import com.cloudera.impala.thrift.TGetDbsParams;
-import com.cloudera.impala.thrift.TGetDbsResult;
-import com.cloudera.impala.thrift.TGetFunctionsParams;
-import com.cloudera.impala.thrift.TGetFunctionsResult;
-import com.cloudera.impala.thrift.TGetHadoopConfigRequest;
-import com.cloudera.impala.thrift.TGetHadoopConfigResponse;
-import com.cloudera.impala.thrift.TGetTablesParams;
-import com.cloudera.impala.thrift.TGetTablesResult;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TLoadDataResp;
-import com.cloudera.impala.thrift.TLogLevel;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TShowGrantRoleParams;
-import com.cloudera.impala.thrift.TShowRolesParams;
-import com.cloudera.impala.thrift.TShowRolesResult;
-import com.cloudera.impala.thrift.TShowStatsParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.cloudera.impala.util.GlogAppender;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.TSessionStateUtil;
+import org.apache.impala.analysis.ToSqlUtils;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.ImpalaInternalAdminUser;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TDescribeDbParams;
+import org.apache.impala.thrift.TDescribeResult;
+import org.apache.impala.thrift.TDescribeTableParams;
+import org.apache.impala.thrift.TExecRequest;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TGetAllHadoopConfigsResponse;
+import org.apache.impala.thrift.TGetDataSrcsParams;
+import org.apache.impala.thrift.TGetDataSrcsResult;
+import org.apache.impala.thrift.TGetDbsParams;
+import org.apache.impala.thrift.TGetDbsResult;
+import org.apache.impala.thrift.TGetFunctionsParams;
+import org.apache.impala.thrift.TGetFunctionsResult;
+import org.apache.impala.thrift.TGetHadoopConfigRequest;
+import org.apache.impala.thrift.TGetHadoopConfigResponse;
+import org.apache.impala.thrift.TGetTablesParams;
+import org.apache.impala.thrift.TGetTablesResult;
+import org.apache.impala.thrift.TLoadDataReq;
+import org.apache.impala.thrift.TLoadDataResp;
+import org.apache.impala.thrift.TLogLevel;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TShowFilesParams;
+import org.apache.impala.thrift.TShowGrantRoleParams;
+import org.apache.impala.thrift.TShowRolesParams;
+import org.apache.impala.thrift.TShowRolesResult;
+import org.apache.impala.thrift.TShowStatsParams;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.thrift.TUniqueId;
+import org.apache.impala.thrift.TUpdateCatalogCacheRequest;
+import org.apache.impala.thrift.TUpdateMembershipRequest;
+import org.apache.impala.util.GlogAppender;
+import org.apache.impala.util.PatternMatcher;
+import org.apache.impala.util.TSessionStateUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/service/MetadataOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/MetadataOp.java b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
index ebfd984..c24f153 100644
--- a/fe/src/main/java/org/apache/impala/service/MetadataOp.java
+++ b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import java.sql.DatabaseMetaData;
 import java.util.HashSet;
@@ -25,24 +25,24 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.util.PatternMatcher;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java b/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
index 0feb1c1..667da34 100644
--- a/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
+++ b/fe/src/main/java/org/apache/impala/util/AvroSchemaConverter.java
@@ -14,7 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.Arrays;
 import java.util.List;
@@ -25,15 +25,15 @@ import org.codehaus.jackson.JsonNode;
 import org.codehaus.jackson.node.IntNode;
 import org.codehaus.jackson.node.JsonNodeFactory;
 
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
 import com.google.common.collect.Lists;
 
 /**
@@ -206,4 +206,4 @@ public class AvroSchemaConverter {
     structSchema.setFields(schemaFields);
     return structSchema;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/AvroSchemaParser.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/AvroSchemaParser.java b/fe/src/main/java/org/apache/impala/util/AvroSchemaParser.java
index 60b0c7a..36a586b 100644
--- a/fe/src/main/java/org/apache/impala/util/AvroSchemaParser.java
+++ b/fe/src/main/java/org/apache/impala/util/AvroSchemaParser.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import static org.apache.avro.Schema.Type.BOOLEAN;
 import static org.apache.avro.Schema.Type.DOUBLE;
@@ -33,15 +33,15 @@ import org.apache.avro.Schema;
 import org.apache.avro.SchemaParseException;
 import org.codehaus.jackson.JsonNode;
 
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.TypeDef;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.TypeDef;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/AvroSchemaUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/AvroSchemaUtils.java b/fe/src/main/java/org/apache/impala/util/AvroSchemaUtils.java
index f86c347..f5b3bb4 100644
--- a/fe/src/main/java/org/apache/impala/util/AvroSchemaUtils.java
+++ b/fe/src/main/java/org/apache/impala/util/AvroSchemaUtils.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -28,10 +28,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/DisjointSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/DisjointSet.java b/fe/src/main/java/org/apache/impala/util/DisjointSet.java
index bce214e..d5543b1 100644
--- a/fe/src/main/java/org/apache/impala/util/DisjointSet.java
+++ b/fe/src/main/java/org/apache/impala/util/DisjointSet.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.Collection;
 import java.util.Iterator;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/EventSequence.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/EventSequence.java b/fe/src/main/java/org/apache/impala/util/EventSequence.java
index 6b12c2e..8ce286a 100644
--- a/fe/src/main/java/org/apache/impala/util/EventSequence.java
+++ b/fe/src/main/java/org/apache/impala/util/EventSequence.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.List;
 
-import com.cloudera.impala.thrift.TEventSequence;
+import org.apache.impala.thrift.TEventSequence;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/FileWatchService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/FileWatchService.java b/fe/src/main/java/org/apache/impala/util/FileWatchService.java
index 88a456d..b3d9483 100644
--- a/fe/src/main/java/org/apache/impala/util/FileWatchService.java
+++ b/fe/src/main/java/org/apache/impala/util/FileWatchService.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.File;
 import java.util.concurrent.Executors;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
index 7523cc8..8c7b1cc 100644
--- a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
+++ b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.IOException;
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/GlogAppender.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/GlogAppender.java b/fe/src/main/java/org/apache/impala/util/GlogAppender.java
index a5e1eb1..edefd0f 100644
--- a/fe/src/main/java/org/apache/impala/util/GlogAppender.java
+++ b/fe/src/main/java/org/apache/impala/util/GlogAppender.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.Properties;
 
@@ -25,8 +25,8 @@ import org.apache.log4j.Logger;
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.spi.LoggingEvent;
 
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TLogLevel;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TLogLevel;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 
@@ -96,7 +96,7 @@ public class GlogAppender extends AppenderSkeleton {
    * Manually override Log4j root logger configuration. Any values in log4j.properties
    * not overridden (that is, anything but the root logger and its default level) will
    * continue to have effect.
-   *  - impalaLogLevel - the maximum log level for com.cloudera.impala.* classes
+   *  - impalaLogLevel - the maximum log level for org.apache.impala.* classes
    *  - otherLogLevel - the maximum log level for all other classes
    */
   public static void Install(TLogLevel impalaLogLevel, TLogLevel otherLogLevel)
@@ -116,11 +116,11 @@ public class GlogAppender extends AppenderSkeleton {
     //
     // Finally we can configure per-package logging which overrides the rootLogger
     // setting. In order to control Impala's logging independently of the rest of the
-    // world, we set the log level for com.cloudera.impala.
+    // world, we set the log level for org.apache.impala.
     properties.setProperty("log4j.rootLogger",
         log4jLevelForTLogLevel(otherLogLevel) + ",glog");
     properties.setProperty("log4j.appender.glog.Threshold", "TRACE");
-    properties.setProperty("log4j.logger.com.cloudera.impala",
+    properties.setProperty("log4j.logger.org.apache.impala",
         log4jLevelForTLogLevel(impalaLogLevel));
     PropertyConfigurator.configure(properties);
     Logger.getLogger(GlogAppender.class).info(String.format("Logging initialized. " +



[37/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowDataSrcsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowDataSrcsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowDataSrcsStmt.java
deleted file mode 100644
index 291c467..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowDataSrcsStmt.java
+++ /dev/null
@@ -1,72 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowDataSrcsParams;
-
-/**
- * Representation of a SHOW DATA SOURCES [pattern] statement.
- * Acceptable syntax:
- *
- * SHOW DATA SOURCES
- * SHOW DATA SOURCES LIKE 'pattern'
- * TODO: Refactor Show*Stmt to remove code duplication
- */
-public class ShowDataSrcsStmt extends StatementBase {
-  // Pattern to match tables against. | denotes choice, * matches all strings
-  private final String pattern_;
-
-  /**
-   * Default constructor, which creates a show statement which returns all
-   * data sources.
-   */
-  public ShowDataSrcsStmt() {
-    this(null);
-  }
-
-  /**
-   * Constructs a show statement which matches all data sources against the
-   * supplied pattern.
-   */
-  public ShowDataSrcsStmt(String pattern) {
-    this.pattern_ = pattern;
-  }
-
-  public String getPattern() { return pattern_; }
-
-  @Override
-  public String toSql() {
-    if (pattern_ == null) {
-      return "SHOW DATA SOURCES";
-    } else {
-      return "SHOW DATA SOURCES LIKE '" + pattern_ + "'";
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Nothing to do here
-  }
-
-  public TShowDataSrcsParams toThrift() {
-    TShowDataSrcsParams params = new TShowDataSrcsParams();
-    params.setShow_pattern(getPattern());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowDbsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowDbsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowDbsStmt.java
deleted file mode 100644
index 40c3cc8..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowDbsStmt.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowDbsParams;
-
-/**
- * Representation of a SHOW DATABASES [pattern] statement.
- * Acceptable syntax:
- *
- * SHOW DATABASES
- * SHOW SCHEMAS
- * SHOW DATABASES LIKE 'pattern'
- * SHOW SCHEMAS LIKE 'pattern'
- *
- */
-public class ShowDbsStmt extends StatementBase {
-  // Pattern to match tables against. | denotes choice, * matches all strings
-  private final String pattern_;
-
-  /**
-   * Default constructor, which creates a show statement which returns all
-   * databases.
-   */
-  public ShowDbsStmt() {
-    this(null);
-  }
-
-  /**
-   * Constructs a show statement which matches all databases against the
-   * supplied pattern.
-   */
-  public ShowDbsStmt(String pattern) {
-    this.pattern_ = pattern;
-  }
-
-  public String getPattern() { return pattern_; }
-
-  @Override
-  public String toSql() {
-    if (pattern_ == null) {
-      return "SHOW DATABASES";
-    } else {
-      return "SHOW DATABASES LIKE '" + pattern_ + "'";
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Nothing to do here
-  }
-
-  public TShowDbsParams toThrift() {
-    TShowDbsParams params = new TShowDbsParams();
-    params.setShow_pattern(getPattern());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowFilesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowFilesStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowFilesStmt.java
deleted file mode 100644
index 0bfc46b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowFilesStmt.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of a SHOW FILES statement.
- * Acceptable syntax:
- *
- * SHOW FILES IN [dbName.]tableName [PARTITION(key=value,...)]
- *
- */
-public class ShowFilesStmt extends StatementBase {
-  private TableName tableName_;
-
-  // Show files for all the partitions if this is null.
-  private final PartitionSpec partitionSpec_;
-
-  // Set during analysis.
-  protected Table table_;
-
-  public ShowFilesStmt(TableName tableName, PartitionSpec partitionSpec) {
-    this.tableName_ = tableName;
-    this.partitionSpec_ = partitionSpec;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append("SHOW FILES IN " + tableName_.toString());
-    if (partitionSpec_ != null) strBuilder.append(" " + partitionSpec_.toSql());
-    return strBuilder.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!tableName_.isFullyQualified()) {
-      tableName_ = new TableName(analyzer.getDefaultDb(), tableName_.getTbl());
-    }
-    table_ = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
-    if (!(table_ instanceof HdfsTable)) {
-      throw new AnalysisException(String.format(
-          "SHOW FILES not applicable to a non hdfs table: %s", table_.getFullName()));
-    }
-
-    // Analyze the partition spec, if one was specified.
-    if (partitionSpec_ != null) {
-      partitionSpec_.setTableName(tableName_);
-      partitionSpec_.setPartitionShouldExist();
-      partitionSpec_.setPrivilegeRequirement(Privilege.VIEW_METADATA);
-      partitionSpec_.analyze(analyzer);
-    }
-  }
-
-  public TShowFilesParams toThrift() {
-    TShowFilesParams params = new TShowFilesParams();
-    params.setTable_name(new TTableName(tableName_.getDb(), tableName_.getTbl()));
-    if (partitionSpec_ != null) {
-      params.setPartition_spec(partitionSpec_.toThrift());
-    }
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowFunctionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowFunctionsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowFunctionsStmt.java
deleted file mode 100644
index 33129d2..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowFunctionsStmt.java
+++ /dev/null
@@ -1,94 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TShowFunctionsParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SHOW FUNCTIONS [pattern] statement.
- * Acceptable syntax:
- *
- * SHOW FUNCTIONS
- * SHOW FUNCTIONS LIKE 'pattern'
- *
- */
-public class ShowFunctionsStmt extends StatementBase {
-  // Pattern to match tables against. | denotes choice, * matches all strings
-  private final String pattern_;
-
-  // DB (if any) as seen by the parser
-  private final String parsedDb_;
-
-  // Category of functions to be shown. Always set.
-  private final TFunctionCategory fnCategory_;
-
-  // Set during analysis
-  private String postAnalysisDb_;
-
-  /**
-   * Constructs a show statement which matches all functions against the
-   * supplied pattern.
-   */
-  public ShowFunctionsStmt(String db, String pattern, TFunctionCategory fnCategory) {
-    Preconditions.checkNotNull(fnCategory);
-    parsedDb_ = db;
-    pattern_ = pattern;
-    fnCategory_ = fnCategory;
-  }
-
-  /**
-   * Can only be called after analysis, returns the name of the database that
-   * this show will search against.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(postAnalysisDb_);
-    return postAnalysisDb_;
-  }
-
-  public String getPattern() { return pattern_; }
-
-  @Override
-  public String toSql() {
-    String fnCategory = (fnCategory_ == null) ? "" : fnCategory_.toString() + " ";
-    if (pattern_ == null) {
-      return "SHOW " + fnCategory + "FUNCTIONS";
-    } else {
-      return "SHOW " + fnCategory + "FUNCTIONS LIKE '" + pattern_ + "'";
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    postAnalysisDb_ = (parsedDb_ == null ? analyzer.getDefaultDb() : parsedDb_);
-    if (analyzer.getDb(postAnalysisDb_, Privilege.VIEW_METADATA) == null) {
-      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + postAnalysisDb_);
-    }
-  }
-
-  public TShowFunctionsParams toThrift() {
-    TShowFunctionsParams params = new TShowFunctionsParams();
-    params.setCategory(fnCategory_);
-    params.setDb(getDb());
-    params.setShow_pattern(getPattern());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowGrantRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowGrantRoleStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowGrantRoleStmt.java
deleted file mode 100644
index b8fe737..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowGrantRoleStmt.java
+++ /dev/null
@@ -1,77 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TShowGrantRoleParams;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
-/**
- * Represents a "SHOW GRANT ROLE <role> [ON <privilegeSpec>]" statement.
- */
-public class ShowGrantRoleStmt extends AuthorizationStmt {
-  private final PrivilegeSpec privilegeSpec_;
-  private final String roleName_;
-
-  // Set/modified during analysis
-  private Role role_;
-
-  public ShowGrantRoleStmt(String roleName, PrivilegeSpec privilegeSpec) {
-    Preconditions.checkNotNull(roleName);
-    roleName_ = roleName;
-    privilegeSpec_ = privilegeSpec;
-  }
-
-  public TShowGrantRoleParams toThrift() throws InternalException {
-    TShowGrantRoleParams params = new TShowGrantRoleParams();
-    params.setRole_name(roleName_);
-    params.setRequesting_user(requestingUser_.getShortName());
-    if (privilegeSpec_ != null) {
-      params.setPrivilege(privilegeSpec_.toThrift().get(0));
-      params.getPrivilege().setRole_id(role_.getId());
-    }
-    return params;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("SHOW GRANT ROLE ");
-    sb.append(roleName_);
-    if (privilegeSpec_ != null) sb.append(" " + privilegeSpec_.toSql());
-    return sb.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (Strings.isNullOrEmpty(roleName_)) {
-      throw new AnalysisException("Role name in SHOW GRANT ROLE cannot be " +
-          "empty.");
-    }
-    role_ = analyzer.getCatalog().getAuthPolicy().getRole(roleName_);
-    if (role_ == null) {
-      throw new AnalysisException(String.format("Role '%s' does not exist.", roleName_));
-    }
-    if (privilegeSpec_ != null) privilegeSpec_.analyze(analyzer);
-  }
-
-  public Role getRole() { return role_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowPartitionsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowPartitionsStmt.java
deleted file mode 100644
index 69cd7a1..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowPartitionsStmt.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SHOW PARTITIONS statement for displaying
- * partition information on a given table.
- */
-public class ShowPartitionsStmt extends ShowStatsStmt {
-
-  public ShowPartitionsStmt(TableName tableName) {
-    super(tableName, false);
-  }
-
-  @Override
-  public String toSql() {
-    return getSqlPrefix() + " " + tableName_.toString();
-  }
-
-  @Override
-  protected String getSqlPrefix() { return "SHOW PARTITIONS"; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Preconditions.checkNotNull(table_);
-    if (!(table_ instanceof HdfsTable)) {
-      throw new AnalysisException(getSqlPrefix() + " must target an HDFS table: " +
-          table_.getFullName());
-    }
-    if (table_.getNumClusteringCols() == 0) {
-      throw new AnalysisException(String.format(
-          "Table is not partitioned: %s", table_.getFullName()));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowRolesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowRolesStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowRolesStmt.java
deleted file mode 100644
index 516edfe..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowRolesStmt.java
+++ /dev/null
@@ -1,72 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TShowRolesParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents "SHOW [CURRENT] ROLES" and "SHOW ROLE GRANT GROUP <groupName>"
- * statements.
- */
-public class ShowRolesStmt extends AuthorizationStmt {
-  // If null, all roles will be shown. Otherwise only roles granted to this
-  // group will be shown.
-  private final String groupName_;
-  private final boolean isShowCurrentRoles_;
-
-  // Set during analysis.
-  private User requestingUser_;
-
-  public ShowRolesStmt(boolean isShowCurrentRoles, String groupName) {
-    // An empty group name should never be possible since group name is an identifier
-    // and Impala does not allow empty identifiers.
-    Preconditions.checkState(!isShowCurrentRoles ||
-        (groupName == null || !groupName.isEmpty()));
-    groupName_ = groupName;
-    isShowCurrentRoles_ = isShowCurrentRoles;
-  }
-
-  @Override
-  public String toSql() {
-    if (groupName_ == null) {
-      return isShowCurrentRoles_ ? "SHOW CURRENT ROLES" : "SHOW ROLES";
-    } else {
-      return "SHOW ROLE GRANT GROUP " + groupName_;
-    }
-  }
-
-  public TShowRolesParams toThrift() throws InternalException {
-    TShowRolesParams params = new TShowRolesParams();
-    params.setRequesting_user(requestingUser_.getShortName());
-    params.setIs_show_current_roles(isShowCurrentRoles_);
-    if (groupName_ != null) params.setGrant_group(groupName_);
-    // Users should always be able to execute SHOW CURRENT ROLES.
-    params.setIs_admin_op(!isShowCurrentRoles_);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    requestingUser_ = analyzer.getUser();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowStatsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowStatsStmt.java
deleted file mode 100644
index a421b71..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowStatsStmt.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowStatsParams;
-
-/**
- * Representation of a SHOW TABLE/COLUMN STATS statement for
- * displaying column and table/partition statistics for a given table.
- */
-public class ShowStatsStmt extends StatementBase {
-  protected final boolean isShowColStats_;
-  protected final TableName tableName_;
-
-  // Set during analysis.
-  protected Table table_;
-
-  public ShowStatsStmt(TableName tableName, boolean isShowColStats) {
-    this.tableName_ = tableName;
-    this.isShowColStats_ = isShowColStats;
-  }
-
-  @Override
-  public String toSql() {
-    return getSqlPrefix() + " " + tableName_.toString();
-  }
-
-  protected String getSqlPrefix() {
-    return "SHOW " + ((isShowColStats_) ? "COLUMN" : "TABLE") + " STATS";
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    table_ = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
-    if (table_ instanceof View) {
-      throw new AnalysisException(String.format(
-          "%s not applicable to a view: %s", getSqlPrefix(), table_.getFullName()));
-    }
-  }
-
-  public TShowStatsParams toThrift() {
-    // Ensure the DB is set in the table_name field by using table and not tableName.
-    return new TShowStatsParams(isShowColStats_,
-        new TableName(table_.getDb().getName(), table_.getName()).toThrift());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowTablesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowTablesStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowTablesStmt.java
deleted file mode 100644
index 22767b5..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowTablesStmt.java
+++ /dev/null
@@ -1,120 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowTablesParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SHOW TABLES [pattern] statement.
- * Acceptable syntax:
- *
- * SHOW TABLES
- * SHOW TABLES "pattern"
- * SHOW TABLES LIKE "pattern"
- * SHOW TABLES IN database
- * SHOW TABLES IN database "pattern"
- * SHOW TABLES IN database LIKE "pattern"
- *
- * In Hive, the 'LIKE' is optional. Also SHOW TABLES unquotedpattern is accepted
- * by the parser but returns no results. We don't support that syntax.
- */
-public class ShowTablesStmt extends StatementBase {
-  // Pattern to match tables against. | denotes choice, * matches all strings
-  private final String pattern_;
-
-  // DB (if any) as seen by the parser
-  private final String parsedDb_;
-
-  // Set during analysis
-  private String postAnalysisDb_;
-
-  /**
-   * Default constructor, which creates a show statement with the default
-   * database and no pattern (which returns all tables in the default database).
-   */
-  public ShowTablesStmt() {
-    this(null, null);
-  }
-
-  /**
-   * Constructs a show statement against the default database using the supplied
-   * pattern.
-   */
-  public ShowTablesStmt(String pattern) {
-    this(null, pattern);
-  }
-
-  /**
-   * General purpose constructor which builds a show statement that matches
-   * table names against a given pattern in the supplied database.
-   *
-   * If pattern is null, all tables in the supplied database match.
-   * If database is null, the default database is searched.
-   */
-  public ShowTablesStmt(String database, String pattern) {
-    this.parsedDb_ = database;
-    this.pattern_ = pattern;
-    this.postAnalysisDb_ = null;
-  }
-
-  public String getPattern() { return pattern_; }
-
-  /**
-   * Can only be called after analysis, returns the name of the database that
-   * this show will search against.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(postAnalysisDb_);
-    return postAnalysisDb_;
-  }
-
-  @Override
-  public String toSql() {
-    if (pattern_ == null) {
-      if (parsedDb_ == null) {
-        return "SHOW TABLES";
-      } else {
-        return "SHOW TABLES IN " + parsedDb_;
-      }
-    } else {
-      if (parsedDb_ == null) {
-        return "SHOW TABLES LIKE '" + pattern_ + "'";
-      } else {
-        return "SHOW TABLES IN " + parsedDb_ + " LIKE '" + pattern_ + "'";
-      }
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    postAnalysisDb_ = (parsedDb_ == null ? analyzer.getDefaultDb() : parsedDb_);
-    if (analyzer.getDb(postAnalysisDb_, Privilege.ANY) == null) {
-      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + postAnalysisDb_);
-    }
-  }
-
-  public TShowTablesParams toThrift() {
-    TShowTablesParams params = new TShowTablesParams();
-    params.setShow_pattern(getPattern());
-    params.setDb(getDb());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SingularRowSrcTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SingularRowSrcTableRef.java b/fe/src/main/java/com/cloudera/impala/analysis/SingularRowSrcTableRef.java
deleted file mode 100644
index 0a5d331..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SingularRowSrcTableRef.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.planner.PlanNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Dummy table ref that is used in plan generation for adding a SingularRowSrcNode
- * inside a SubplanNode's plan tree (second child).
- */
-public class SingularRowSrcTableRef extends TableRef {
-  private final List<TupleId> tblRefIds_;
-  private final List<TupleId> tupleIds_;
-
-  public SingularRowSrcTableRef(PlanNode subplanInput) {
-    super(null, "singular-row-src-tblref");
-    Preconditions.checkNotNull(subplanInput);
-    desc_ = null;
-    isAnalyzed_ = true;
-    tblRefIds_ = Lists.newArrayList(subplanInput.getTblRefIds());
-    tupleIds_ = Lists.newArrayList(subplanInput.getTupleIds());
-  }
-
-  /**
-   * This override is needed to support join inversion where the original lhs
-   * is a SingularRowSrcTableRef.
-   */
-  @Override
-  public void setLeftTblRef(TableRef leftTblRef) {
-    super.setLeftTblRef(leftTblRef);
-    tblRefIds_.clear();
-    tupleIds_.clear();
-    tblRefIds_.addAll(leftTblRef_.getAllTableRefIds());
-    tupleIds_.addAll(leftTblRef_.getAllMaterializedTupleIds());
-  }
-
-  @Override
-  public TupleId getId() { return tblRefIds_.get(tblRefIds_.size() - 1); }
-
-  @Override
-  public List<TupleId> getAllTableRefIds() { return tblRefIds_; }
-
-  @Override
-  public List<TupleId> getAllMaterializedTupleIds() { return tupleIds_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SlotDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SlotDescriptor.java b/fe/src/main/java/com/cloudera/impala/analysis/SlotDescriptor.java
deleted file mode 100644
index 7850a0e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SlotDescriptor.java
+++ /dev/null
@@ -1,261 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.Collections;
-import java.util.List;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.KuduColumn;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.thrift.TSlotDescriptor;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class SlotDescriptor {
-  private final SlotId id_;
-  private final TupleDescriptor parent_;
-
-  // Resolved path to the column/field corresponding to this slot descriptor, if any,
-  // Only set for slots that represent a column/field materialized in a scan.
-  private Path path_;
-  private Type type_;
-
-  // Tuple descriptor for collection items. Only set if type_ is an array or map.
-  private TupleDescriptor itemTupleDesc_;
-
-  // for SlotRef.toSql() in the absence of a path
-  private String label_;
-
-  // Expr(s) materialized into this slot; multiple exprs for unions. Should be empty if
-  // path_ is set.
-  private List<Expr> sourceExprs_ = Lists.newArrayList();
-
-  // if false, this slot doesn't need to be materialized in parent tuple
-  // (and physical layout parameters are invalid)
-  private boolean isMaterialized_ = false;
-
-  // if false, this slot cannot be NULL
-  private boolean isNullable_ = true;
-
-  // physical layout parameters
-  private int byteSize_;
-  private int byteOffset_;  // within tuple
-  private int nullIndicatorByte_;  // index into byte array
-  private int nullIndicatorBit_; // index within byte
-  private int slotIdx_;          // index within tuple struct
-
-  private ColumnStats stats_;  // only set if 'column' isn't set
-
-  SlotDescriptor(SlotId id, TupleDescriptor parent) {
-    id_ = id;
-    parent_ = parent;
-    byteOffset_ = -1;  // invalid
-  }
-
-  SlotDescriptor(SlotId id, TupleDescriptor parent, SlotDescriptor src) {
-    id_ = id;
-    parent_ = parent;
-    type_ = src.type_;
-    itemTupleDesc_ = src.itemTupleDesc_;
-    path_ = src.path_;
-    label_ = src.label_;
-    sourceExprs_ = src.sourceExprs_;
-    isMaterialized_ = src.isMaterialized_;
-    isNullable_ = src.isNullable_;
-    byteSize_ = src.byteSize_;
-    byteOffset_ = src.byteOffset_;
-    nullIndicatorByte_ = src.nullIndicatorByte_;
-    nullIndicatorBit_ = src.nullIndicatorBit_;
-    slotIdx_ = src.slotIdx_;
-    stats_ = src.stats_;
-  }
-
-  public int getNullIndicatorByte() { return nullIndicatorByte_; }
-  public void setNullIndicatorByte(int nullIndicatorByte) {
-    this.nullIndicatorByte_ = nullIndicatorByte;
-  }
-  public int getNullIndicatorBit() { return nullIndicatorBit_; }
-  public void setNullIndicatorBit(int nullIndicatorBit) {
-    this.nullIndicatorBit_ = nullIndicatorBit;
-  }
-  public SlotId getId() { return id_; }
-  public TupleDescriptor getParent() { return parent_; }
-  public Type getType() { return type_; }
-  public void setType(Type type) { type_ = type; }
-  public TupleDescriptor getItemTupleDesc() { return itemTupleDesc_; }
-  public void setItemTupleDesc(TupleDescriptor t) {
-    Preconditions.checkState(
-        itemTupleDesc_ == null, "Item tuple descriptor already set.");
-    itemTupleDesc_ = t;
-  }
-  public boolean isMaterialized() { return isMaterialized_; }
-  public void setIsMaterialized(boolean value) { isMaterialized_ = value; }
-  public boolean getIsNullable() { return isNullable_; }
-  public void setIsNullable(boolean value) { isNullable_ = value; }
-  public int getByteSize() { return byteSize_; }
-  public void setByteSize(int byteSize) { this.byteSize_ = byteSize; }
-  public int getByteOffset() { return byteOffset_; }
-  public void setByteOffset(int byteOffset) { this.byteOffset_ = byteOffset; }
-  public void setSlotIdx(int slotIdx) { this.slotIdx_ = slotIdx; }
-  public String getLabel() { return label_; }
-  public void setLabel(String label) { label_ = label; }
-  public void setSourceExprs(List<Expr> exprs) { sourceExprs_ = exprs; }
-  public void setSourceExpr(Expr expr) { sourceExprs_ = Collections.singletonList(expr); }
-  public void addSourceExpr(Expr expr) { sourceExprs_.add(expr); }
-  public List<Expr> getSourceExprs() { return sourceExprs_; }
-  public void setStats(ColumnStats stats) { this.stats_ = stats; }
-
-  public void setPath(Path path) {
-    Preconditions.checkNotNull(path);
-    Preconditions.checkState(path.isRootedAtTuple());
-    Preconditions.checkState(path.getRootDesc() == parent_);
-    path_ = path;
-    type_ = path_.destType();
-    label_ = Joiner.on(".").join(path.getRawPath());
-
-    // Set nullability, if this refers to a KuduColumn.
-    if (path_.destColumn() instanceof KuduColumn) {
-      KuduColumn kuduColumn = (KuduColumn)path_.destColumn();
-      isNullable_ = kuduColumn.isNullable();
-    }
-  }
-
-  public Path getPath() { return path_; }
-
-  public boolean isScanSlot() { return path_ != null && path_.isRootedAtTable(); }
-
-  public Column getColumn() { return !isScanSlot() ? null : path_.destColumn(); }
-
-  public ColumnStats getStats() {
-    if (stats_ == null) {
-      Column c = getColumn();
-      if (c != null) {
-        stats_ = c.getStats();
-      } else {
-        stats_ = new ColumnStats(type_);
-      }
-    }
-    return stats_;
-  }
-
-  /**
-   * Assembles the absolute materialized path to this slot starting from the schema
-   * root. The materialized path points to the first non-struct schema element along the
-   * path starting from the parent's tuple path to this slot's path.
-   *
-   * The materialized path is used to determine when a new tuple (containing a new
-   * instance of this slot) should be created. A tuple is emitted for every data item
-   * pointed to by the materialized path. For scalar slots this trivially means that every
-   * data item goes into a different tuple. For collection slots, the materialized path
-   * specifies how many data items go into a single collection value.
-   *
-   * For scalar slots, the materialized path is the same as its path. However, for
-   * collection slots, the materialized path may be different than path_. This happens
-   * when the query materializes a "flattened" collection composed of concatenated nested
-   * collections.
-   *
-   * For example, given the table:
-   *   CREATE TABLE tbl (id bigint, outer_array array<array<int>>);
-   *
-   * And the query:
-   *   select id, inner_array.item from tbl t, t.outer_array.item inner_array
-   *
-   * The path 't.outer_array.item' corresponds to the absolute path [1,0]. However, the
-   * 'inner_array' slot appears in the table-level tuple, with tuplePath [] (i.e. one
-   * tuple materialized per table row). There is a single array materialized per
-   * 'outer_array', not per 'inner_array'. Thus the materializedPath for this slot will be
-   * [1], not [1,0].
-   */
-  public List<Integer> getMaterializedPath() {
-    Preconditions.checkNotNull(parent_);
-    // A slot descriptor typically only has a path if the parent also has one.
-    // However, we sometimes materialize inline-view tuples when generating plan trees
-    // with EmptySetNode portions. In that case, a slot descriptor could have a non-empty
-    // path pointing into the inline-view tuple (which has no path).
-    if (!isScanSlot() || parent_.getPath() == null) return Collections.emptyList();
-    Preconditions.checkState(path_.isResolved());
-
-    List<Integer> materializedPath = Lists.newArrayList(path_.getAbsolutePath());
-    // For scalar types, the materialized path is the same as path_
-    if (type_.isScalarType()) return materializedPath;
-    Preconditions.checkState(type_.isCollectionType());
-    Preconditions.checkState(path_.getFirstCollectionIndex() != -1);
-    // Truncate materializedPath after first collection element
-    // 'offset' adjusts for the index returned by path_.getFirstCollectionIndex() being
-    // relative to path_.getRootDesc()
-    int offset = !path_.isRootedAtTuple() ? 0 :
-        path_.getRootDesc().getPath().getAbsolutePath().size();
-    materializedPath.subList(
-        offset + path_.getFirstCollectionIndex() + 1, materializedPath.size()).clear();
-    return materializedPath;
-  }
-
-  /**
-   * Initializes a slot by setting its source expression information
-   */
-  public void initFromExpr(Expr expr) {
-    setLabel(expr.toSql());
-    Preconditions.checkState(sourceExprs_.isEmpty());
-    setSourceExpr(expr);
-    setStats(ColumnStats.fromExpr(expr));
-    Preconditions.checkState(expr.getType().isValid());
-    setType(expr.getType());
-  }
-
-  public TSlotDescriptor toThrift() {
-    Preconditions.checkState(isMaterialized_);
-    List<Integer> materializedPath = getMaterializedPath();
-    TSlotDescriptor result = new TSlotDescriptor(
-        id_.asInt(), parent_.getId().asInt(), type_.toThrift(),
-        materializedPath, byteOffset_, nullIndicatorByte_, nullIndicatorBit_,
-        slotIdx_);
-    if (itemTupleDesc_ != null) {
-      // Check for recursive or otherwise invalid item tuple descriptors. Since we assign
-      // tuple ids globally in increasing order, the id of an item tuple descriptor must
-      // always have been generated after the parent tuple id if the tuple/slot belong
-      // to a base table. For example, tuples/slots introduced during planning do not
-      // have such a guarantee.
-      Preconditions.checkState(!isScanSlot() ||
-          itemTupleDesc_.getId().asInt() > parent_.getId().asInt());
-      result.setItemTupleId(itemTupleDesc_.getId().asInt());
-    }
-    return result;
-  }
-
-  public String debugString() {
-    String pathStr = (path_ == null) ? "null" : path_.toString();
-    String typeStr = (type_ == null ? "null" : type_.toString());
-    return Objects.toStringHelper(this)
-        .add("id", id_.asInt())
-        .add("path", pathStr)
-        .add("type", typeStr)
-        .add("materialized", isMaterialized_)
-        .add("byteSize", byteSize_)
-        .add("byteOffset", byteOffset_)
-        .add("nullable", isNullable_)
-        .add("nullIndicatorByte", nullIndicatorByte_)
-        .add("nullIndicatorBit", nullIndicatorBit_)
-        .add("slotIdx", slotIdx_)
-        .add("stats", stats_)
-        .toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SlotId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SlotId.java b/fe/src/main/java/com/cloudera/impala/analysis/SlotId.java
deleted file mode 100644
index 36e88e7..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SlotId.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class SlotId extends Id<SlotId> {
-  // Construction only allowed via an IdGenerator.
-  protected SlotId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<SlotId> createGenerator() {
-    return new IdGenerator<SlotId>() {
-      @Override
-      public SlotId getNextId() { return new SlotId(nextId_++); }
-      @Override
-      public SlotId getMaxId() { return new SlotId(nextId_ - 1); }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SlotRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SlotRef.java b/fe/src/main/java/com/cloudera/impala/analysis/SlotRef.java
deleted file mode 100644
index 8a544d0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SlotRef.java
+++ /dev/null
@@ -1,243 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TSlotRef;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-public class SlotRef extends Expr {
-  private final static Logger LOG = LoggerFactory.getLogger(SlotRef.class);
-
-  private final List<String> rawPath_;
-  private final String label_;  // printed in toSql()
-
-  // Results of analysis.
-  private SlotDescriptor desc_;
-
-  public SlotRef(ArrayList<String> rawPath) {
-    super();
-    rawPath_ = rawPath;
-    label_ = ToSqlUtils.getPathSql(rawPath_);
-  }
-
-  /**
-   * C'tor for a "dummy" SlotRef used in substitution maps.
-   */
-  public SlotRef(String alias) {
-    super();
-    rawPath_ = null;
-    // Relies on the label_ being compared in equals().
-    label_ = ToSqlUtils.getIdentSql(alias.toLowerCase());
-  }
-
-  /**
-   * C'tor for a "pre-analyzed" ref to a slot.
-   */
-  public SlotRef(SlotDescriptor desc) {
-    super();
-    if (desc.isScanSlot()) {
-      rawPath_ = desc.getPath().getRawPath();
-    } else {
-      rawPath_ = null;
-    }
-    isAnalyzed_ = true;
-    desc_ = desc;
-    type_ = desc.getType();
-    evalCost_ = SLOT_REF_COST;
-    String alias = desc.getParent().getAlias();
-    label_ = (alias != null ? alias + "." : "") + desc.getLabel();
-    numDistinctValues_ = desc.getStats().getNumDistinctValues();
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private SlotRef(SlotRef other) {
-    super(other);
-    rawPath_ = other.rawPath_;
-    label_ = other.label_;
-    desc_ = other.desc_;
-    type_ = other.type_;
-    isAnalyzed_ = other.isAnalyzed_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    Path resolvedPath = null;
-    try {
-      resolvedPath = analyzer.resolvePath(rawPath_, PathType.SLOT_REF);
-    } catch (TableLoadingException e) {
-      // Should never happen because we only check registered table aliases.
-      Preconditions.checkState(false);
-    }
-    Preconditions.checkNotNull(resolvedPath);
-    desc_ = analyzer.registerSlotRef(resolvedPath);
-    type_ = desc_.getType();
-    if (!type_.isSupported()) {
-      throw new AnalysisException("Unsupported type '"
-          + type_.toSql() + "' in '" + toSql() + "'.");
-    }
-    if (type_.isInvalid()) {
-      // In this case, the metastore contained a string we can't parse at all
-      // e.g. map. We could report a better error if we stored the original
-      // HMS string.
-      throw new AnalysisException("Unsupported type in '" + toSql() + "'.");
-    }
-    evalCost_ = SLOT_REF_COST;
-
-    numDistinctValues_ = desc_.getStats().getNumDistinctValues();
-    Table rootTable = resolvedPath.getRootTable();
-    if (rootTable != null && rootTable.getNumRows() > 0) {
-      // The NDV cannot exceed the #rows in the table.
-      numDistinctValues_ = Math.min(numDistinctValues_, rootTable.getNumRows());
-    }
-    isAnalyzed_ = true;
-  }
-
-  @Override
-  public boolean isConstant() { return false; }
-
-  public SlotDescriptor getDesc() {
-    Preconditions.checkState(isAnalyzed_);
-    Preconditions.checkNotNull(desc_);
-    return desc_;
-  }
-
-  public SlotId getSlotId() {
-    Preconditions.checkState(isAnalyzed_);
-    Preconditions.checkNotNull(desc_);
-    return desc_.getId();
-  }
-
-  public Path getResolvedPath() {
-    Preconditions.checkState(isAnalyzed_);
-    return desc_.getPath();
-  }
-
-  @Override
-  public String toSqlImpl() {
-    if (label_ != null) return label_;
-    if (rawPath_ != null) return ToSqlUtils.getPathSql(rawPath_);
-    return "<slot " + Integer.toString(desc_.getId().asInt()) + ">";
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.SLOT_REF;
-    msg.slot_ref = new TSlotRef(desc_.getId().asInt());
-    // we shouldn't be sending exprs over non-materialized slots
-    Preconditions.checkState(desc_.isMaterialized(), String.format(
-        "Illegal reference to non-materialized slot: tid=%s sid=%s",
-        desc_.getParent().getId(), desc_.getId()));
-    // check that the tuples associated with this slot are executable
-    desc_.getParent().checkIsExecutable();
-    if (desc_.getItemTupleDesc() != null) desc_.getItemTupleDesc().checkIsExecutable();
-  }
-
-  @Override
-  public String debugString() {
-    Objects.ToStringHelper toStrHelper = Objects.toStringHelper(this);
-    if (rawPath_ != null) toStrHelper.add("path", Joiner.on('.').join(rawPath_));
-    toStrHelper.add("type", type_.toSql());
-    String idStr = (desc_ == null ? "null" : Integer.toString(desc_.getId().asInt()));
-    toStrHelper.add("id", idStr);
-    return toStrHelper.toString();
-  }
-
-  @Override
-  public int hashCode() {
-    if (desc_ != null) return desc_.getId().hashCode();
-    return Objects.hashCode(Joiner.on('.').join(rawPath_).toLowerCase());
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    SlotRef other = (SlotRef) obj;
-    // check slot ids first; if they're both set we only need to compare those
-    // (regardless of how the ref was constructed)
-    if (desc_ != null && other.desc_ != null) {
-      return desc_.getId().equals(other.desc_.getId());
-    }
-    if ((label_ == null) != (other.label_ == null)) return false;
-    if (!label_.equalsIgnoreCase(other.label_)) return false;
-    return true;
-  }
-
-  @Override
-  public boolean isBoundByTupleIds(List<TupleId> tids) {
-    Preconditions.checkState(desc_ != null);
-    for (TupleId tid: tids) {
-      if (tid.equals(desc_.getParent().getId())) return true;
-    }
-    return false;
-  }
-
-  @Override
-  public boolean isBoundBySlotIds(List<SlotId> slotIds) {
-    Preconditions.checkState(isAnalyzed_);
-    return slotIds.contains(desc_.getId());
-  }
-
-  @Override
-  public void getIdsHelper(Set<TupleId> tupleIds, Set<SlotId> slotIds) {
-    Preconditions.checkState(type_.isValid());
-    Preconditions.checkState(desc_ != null);
-    if (slotIds != null) slotIds.add(desc_.getId());
-    if (tupleIds != null) tupleIds.add(desc_.getParent().getId());
-  }
-
-  @Override
-  public Expr clone() { return new SlotRef(this); }
-
-  @Override
-  public String toString() {
-    if (desc_ != null) {
-      return "tid=" + desc_.getParent().getId() + " sid=" + desc_.getId();
-    }
-    return "no desc set";
-  }
-
-  @Override
-  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
-    if (type_.isNull()) {
-      // Hack to prevent null SlotRefs in the BE
-      return NullLiteral.create(targetType);
-    } else {
-      return super.uncheckedCastTo(targetType);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SortInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SortInfo.java b/fe/src/main/java/com/cloudera/impala/analysis/SortInfo.java
deleted file mode 100644
index 469f1e6..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SortInfo.java
+++ /dev/null
@@ -1,131 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Encapsulates all the information needed to compute ORDER BY
- * This doesn't contain aliases or positional exprs.
- * TODO: reorganize this completely, this doesn't really encapsulate anything; this
- * should move into planner/ and encapsulate the implementation of the sort of a
- * particular input row (materialize all row slots)
- */
-public class SortInfo {
-  private List<Expr> orderingExprs_;
-  private final List<Boolean> isAscOrder_;
-  // True if "NULLS FIRST", false if "NULLS LAST", null if not specified.
-  private final List<Boolean> nullsFirstParams_;
-  // The single tuple that is materialized, sorted and output by a sort operator
-  // (i.e. SortNode or TopNNode)
-  private TupleDescriptor sortTupleDesc_;
-  // Input expressions materialized into sortTupleDesc_. One expr per slot in
-  // sortTupleDesc_.
-  private List<Expr> sortTupleSlotExprs_;
-
-  public SortInfo(List<Expr> orderingExprs, List<Boolean> isAscOrder,
-      List<Boolean> nullsFirstParams) {
-    Preconditions.checkArgument(orderingExprs.size() == isAscOrder.size());
-    Preconditions.checkArgument(orderingExprs.size() == nullsFirstParams.size());
-    orderingExprs_ = orderingExprs;
-    isAscOrder_ = isAscOrder;
-    nullsFirstParams_ = nullsFirstParams;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private SortInfo(SortInfo other) {
-    orderingExprs_ = Expr.cloneList(other.orderingExprs_);
-    isAscOrder_ = Lists.newArrayList(other.isAscOrder_);
-    nullsFirstParams_ = Lists.newArrayList(other.nullsFirstParams_);
-    sortTupleDesc_ = other.sortTupleDesc_;
-    if (other.sortTupleSlotExprs_ != null) {
-      sortTupleSlotExprs_ = Expr.cloneList(other.sortTupleSlotExprs_);
-    }
-  }
-
-  public void setMaterializedTupleInfo(
-      TupleDescriptor tupleDesc, List<Expr> tupleSlotExprs) {
-    sortTupleDesc_ = tupleDesc;
-    sortTupleSlotExprs_ = tupleSlotExprs;
-    for (int i = 0; i < sortTupleDesc_.getSlots().size(); ++i) {
-      SlotDescriptor slotDesc = sortTupleDesc_.getSlots().get(i);
-      slotDesc.setSourceExpr(sortTupleSlotExprs_.get(i));
-    }
-  }
-  public List<Expr> getOrderingExprs() { return orderingExprs_; }
-  public List<Boolean> getIsAscOrder() { return isAscOrder_; }
-  public List<Boolean> getNullsFirstParams() { return nullsFirstParams_; }
-  public List<Expr> getSortTupleSlotExprs() { return sortTupleSlotExprs_; }
-  public TupleDescriptor getSortTupleDescriptor() { return sortTupleDesc_; }
-
-  /**
-   * Gets the list of booleans indicating whether nulls come first or last, independent
-   * of asc/desc.
-   */
-  public List<Boolean> getNullsFirst() {
-    List<Boolean> nullsFirst = Lists.newArrayList();
-    for (int i = 0; i < orderingExprs_.size(); ++i) {
-      nullsFirst.add(OrderByElement.nullsFirst(nullsFirstParams_.get(i),
-          isAscOrder_.get(i)));
-    }
-    return nullsFirst;
-  }
-
-  /**
-   * Materializes the slots in sortTupleDesc_ referenced in the ordering exprs.
-   * Materializes the slots referenced by the corresponding sortTupleSlotExpr after
-   * applying the 'smap'.
-   */
-  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
-    Preconditions.checkNotNull(sortTupleDesc_);
-    Preconditions.checkNotNull(sortTupleSlotExprs_);
-    Preconditions.checkState(sortTupleDesc_.isMaterialized());
-    analyzer.materializeSlots(orderingExprs_);
-    List<SlotDescriptor> sortTupleSlotDescs = sortTupleDesc_.getSlots();
-    List<Expr> materializedExprs = Lists.newArrayList();
-    for (int i = 0; i < sortTupleSlotDescs.size(); ++i) {
-      if (sortTupleSlotDescs.get(i).isMaterialized()) {
-        materializedExprs.add(sortTupleSlotExprs_.get(i));
-      }
-    }
-    List<Expr> substMaterializedExprs =
-        Expr.substituteList(materializedExprs, smap, analyzer, false);
-    analyzer.materializeSlots(substMaterializedExprs);
-  }
-
-  public void substituteOrderingExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
-    orderingExprs_ = Expr.substituteList(orderingExprs_, smap, analyzer, false);
-  }
-
-  /**
-   * Asserts that all ordering exprs are bound by the sort tuple.
-   */
-  public void checkConsistency() {
-    for (Expr orderingExpr: orderingExprs_) {
-      Preconditions.checkState(orderingExpr.isBound(sortTupleDesc_.getId()));
-    }
-  }
-
-  @Override
-  public SortInfo clone() { return new SortInfo(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/StatementBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/StatementBase.java b/fe/src/main/java/com/cloudera/impala/analysis/StatementBase.java
deleted file mode 100644
index 9a6cb1b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/StatementBase.java
+++ /dev/null
@@ -1,141 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.commons.lang.NotImplementedException;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-
-/**
- * Base class for all Impala SQL statements.
- */
-abstract class StatementBase implements ParseNode {
-
-  // True if this Stmt is the top level of an explain stmt.
-  protected boolean isExplain_ = false;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // Analyzer that was used to analyze this statement.
-  protected Analyzer analyzer_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  protected StatementBase() { }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected StatementBase(StatementBase other) {
-    analyzer_ = other.analyzer_;
-    isExplain_ = other.isExplain_;
-  }
-
-  /**
-   * Analyzes the statement and throws an AnalysisException if analysis fails. A failure
-   * could be due to a problem with the statement or because one or more tables/views
-   * were missing from the catalog.
-   * It is up to the analysis() implementation to ensure the maximum number of missing
-   * tables/views get collected in the Analyzer before failing analyze().
-   */
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    if (isExplain_) analyzer.setIsExplain();
-    analyzer_ = analyzer;
-  }
-
-  public Analyzer getAnalyzer() { return analyzer_; }
-  public boolean isAnalyzed() { return analyzer_ != null; }
-
-  public String toSql() { return ""; }
-  public void setIsExplain() { isExplain_ = true; }
-  public boolean isExplain() { return isExplain_; }
-
-  /**
-   * Returns a deep copy of this node including its analysis state. Some members such as
-   * tuple and slot descriptors are generally not deep copied to avoid potential
-   * confusion of having multiple descriptor instances with the same id, although
-   * they should be unique in the descriptor table.
-   * TODO for 2.3: Consider also cloning table and slot descriptors for clarity,
-   * or otherwise make changes to more provide clearly defined clone() semantics.
-   */
-  @Override
-  public StatementBase clone() {
-    throw new NotImplementedException(
-        "Clone() not implemented for " + getClass().getSimpleName());
-  }
-
-  /**
-   * Resets the internal analysis state of this node.
-   * For easier maintenance, class members that need to be reset are grouped into
-   * a 'section' clearly indicated by comments as follows:
-   *
-   * class SomeStmt extends StatementBase {
-   *   ...
-   *   /////////////////////////////////////////
-   *   // BEGIN: Members that need to be reset()
-   *
-   *   <member declarations>
-   *
-   *   // END: Members that need to be reset()
-   *   /////////////////////////////////////////
-   *   ...
-   * }
-   *
-   * In general, members that are set or modified during analyze() must be reset().
-   * TODO: Introduce this same convention for Exprs, possibly by moving clone()/reset()
-   * into the ParseNode interface for clarity.
-   */
-  public void reset() { analyzer_ = null; }
-
-  /**
-   * Checks that 'srcExpr' is type compatible with 'dstCol' and returns a type compatible
-   * expression by applying a CAST() if needed. Throws an AnalysisException the types
-   * are incompatible. 'dstTableName' is only used when constructing an AnalysisException
-   * message.
-   */
-  protected Expr checkTypeCompatibility(String dstTableName, Column dstCol, Expr srcExpr)
-      throws AnalysisException {
-    Type dstColType = dstCol.getType();
-    Type srcExprType = srcExpr.getType();
-
-    // Trivially compatible, unless the type is complex.
-    if (dstColType.equals(srcExprType) && !dstColType.isComplexType()) return srcExpr;
-
-    Type compatType = Type.getAssignmentCompatibleType(dstColType, srcExprType, false);
-    if (!compatType.isValid()) {
-      throw new AnalysisException(String.format(
-          "Target table '%s' is incompatible with source expressions.\nExpression '%s' " +
-              "(type: %s) is not compatible with column '%s' (type: %s)",
-          dstTableName, srcExpr.toSql(), srcExprType.toSql(), dstCol.getName(),
-          dstColType.toSql()));
-    }
-    if (!compatType.equals(dstColType) && !compatType.isNull()) {
-      throw new AnalysisException(String.format(
-          "Possible loss of precision for target table '%s'.\nExpression '%s' (type: " +
-              "%s) would need to be cast to %s for column '%s'",
-          dstTableName, srcExpr.toSql(), srcExprType.toSql(), dstColType.toSql(),
-          dstCol.getName()));
-    }
-    return srcExpr.castTo(compatType);
-  }
-}


[41/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/FunctionCallExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/FunctionCallExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/FunctionCallExpr.java
deleted file mode 100644
index d53aa9e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/FunctionCallExpr.java
+++ /dev/null
@@ -1,516 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.thrift.TAggregateExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class FunctionCallExpr extends Expr {
-  private final FunctionName fnName_;
-  private final FunctionParams params_;
-  private boolean isAnalyticFnCall_ = false;
-  private boolean isInternalFnCall_ = false;
-
-  // Indicates whether this is a merge aggregation function that should use the merge
-  // instead of the update symbol. This flag also affects the behavior of
-  // resetAnalysisState() which is used during expr substitution.
-  private final boolean isMergeAggFn_;
-
-  // Printed in toSqlImpl(), if set. Used for merge agg fns.
-  private String label_;
-
-  public FunctionCallExpr(String functionName, List<Expr> params) {
-    this(new FunctionName(functionName), new FunctionParams(false, params));
-  }
-
-  public FunctionCallExpr(FunctionName fnName, List<Expr> params) {
-    this(fnName, new FunctionParams(false, params));
-  }
-
-  public FunctionCallExpr(FunctionName fnName, FunctionParams params) {
-    this(fnName, params, false);
-  }
-
-  private FunctionCallExpr(
-      FunctionName fnName, FunctionParams params, boolean isMergeAggFn) {
-    super();
-    fnName_ = fnName;
-    params_ = params;
-    isMergeAggFn_ = isMergeAggFn;
-    if (params.exprs() != null) children_ = Lists.newArrayList(params_.exprs());
-  }
-
-  /**
-   * Returns an Expr that evaluates the function call <fnName>(<params>). The returned
-   * Expr is not necessarily a FunctionCallExpr (example: DECODE())
-   */
-  public static Expr createExpr(FunctionName fnName, FunctionParams params) {
-    FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, params);
-    if (fnName.getFnNamePath().size() == 1
-            && fnName.getFnNamePath().get(0).equalsIgnoreCase("decode")
-        || fnName.getFnNamePath().size() == 2
-            && fnName.getFnNamePath().get(0).equalsIgnoreCase(Catalog.BUILTINS_DB)
-            && fnName.getFnNamePath().get(1).equalsIgnoreCase("decode")) {
-      return new CaseExpr(functionCallExpr);
-    }
-    return functionCallExpr;
-  }
-
-  /**
-   * Returns a new function call expr on the given params for performing the merge()
-   * step of the given aggregate function.
-   */
-  public static FunctionCallExpr createMergeAggCall(
-      FunctionCallExpr agg, List<Expr> params) {
-    Preconditions.checkState(agg.isAnalyzed_);
-    Preconditions.checkState(agg.isAggregateFunction());
-    FunctionCallExpr result = new FunctionCallExpr(
-        agg.fnName_, new FunctionParams(false, params), true);
-    // Inherit the function object from 'agg'.
-    result.fn_ = agg.fn_;
-    result.type_ = agg.type_;
-    // Set an explicit label based on the input agg.
-    if (agg.isMergeAggFn_) {
-      result.label_ = agg.label_;
-    } else {
-      // fn(input) becomes fn:merge(input).
-      result.label_ = agg.toSql().replaceFirst(agg.fnName_.toString(),
-          agg.fnName_.toString() + ":merge");
-    }
-    Preconditions.checkState(!result.type_.isWildcardDecimal());
-    return result;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected FunctionCallExpr(FunctionCallExpr other) {
-    super(other);
-    fnName_ = other.fnName_;
-    isAnalyticFnCall_ = other.isAnalyticFnCall_;
-    isInternalFnCall_ = other.isInternalFnCall_;
-    isMergeAggFn_ = other.isMergeAggFn_;
-    // Clone the params in a way that keeps the children_ and the params.exprs()
-    // in sync. The children have already been cloned in the super c'tor.
-    if (other.params_.isStar()) {
-      Preconditions.checkState(children_.isEmpty());
-      params_ = FunctionParams.createStarParam();
-    } else {
-      params_ = new FunctionParams(other.params_.isDistinct(), children_);
-    }
-    label_ = other.label_;
-  }
-
-  public boolean isMergeAggFn() { return isMergeAggFn_; }
-
-  @Override
-  public void resetAnalysisState() {
-    isAnalyzed_ = false;
-    // Resolving merge agg functions after substitution may fail e.g., if the
-    // intermediate agg type is not the same as the output type. Preserve the original
-    // fn_ such that analyze() hits the special-case code for merge agg fns that
-    // handles this case.
-    if (!isMergeAggFn_) fn_ = null;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    FunctionCallExpr o = (FunctionCallExpr)obj;
-    return fnName_.equals(o.fnName_) &&
-           params_.isDistinct() == o.params_.isDistinct() &&
-           params_.isStar() == o.params_.isStar();
-  }
-
-  @Override
-  public String toSqlImpl() {
-    if (label_ != null) return label_;
-    // Merge agg fns should have an explicit label.
-    Preconditions.checkState(!isMergeAggFn_);
-    StringBuilder sb = new StringBuilder();
-    sb.append(fnName_).append("(");
-    if (params_.isStar()) sb.append("*");
-    if (params_.isDistinct()) sb.append("DISTINCT ");
-    sb.append(Joiner.on(", ").join(childrenToSql())).append(")");
-    return sb.toString();
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("name", fnName_)
-        .add("isStar", params_.isStar())
-        .add("isDistinct", params_.isDistinct())
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  public FunctionParams getParams() { return params_; }
-  public boolean isScalarFunction() {
-    Preconditions.checkNotNull(fn_);
-    return fn_ instanceof ScalarFunction ;
-  }
-
-  public Type getReturnType() {
-    Preconditions.checkNotNull(fn_);
-    return fn_.getReturnType();
-  }
-
-  /**
-   * Returns true if this is a call to a non-analytic aggregate function.
-   */
-  public boolean isAggregateFunction() {
-    Preconditions.checkNotNull(fn_);
-    return fn_ instanceof AggregateFunction && !isAnalyticFnCall_;
-  }
-
-  /**
-   * Returns true if this is a call to an aggregate function that returns
-   * non-null on an empty input (e.g. count).
-   */
-  public boolean returnsNonNullOnEmpty() {
-    Preconditions.checkNotNull(fn_);
-    return fn_ instanceof AggregateFunction &&
-        ((AggregateFunction)fn_).returnsNonNullOnEmpty();
-  }
-
-  public boolean isDistinct() {
-    Preconditions.checkState(isAggregateFunction());
-    return params_.isDistinct();
-  }
-
-  public boolean ignoresDistinct() {
-    Preconditions.checkState(isAggregateFunction());
-    return ((AggregateFunction)fn_).ignoresDistinct();
-  }
-
-  public FunctionName getFnName() { return fnName_; }
-  public void setIsAnalyticFnCall(boolean v) { isAnalyticFnCall_ = v; }
-  public void setIsInternalFnCall(boolean v) { isInternalFnCall_ = v; }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    if (isAggregateFunction() || isAnalyticFnCall_) {
-      msg.node_type = TExprNodeType.AGGREGATE_EXPR;
-      if (!isAnalyticFnCall_) msg.setAgg_expr(new TAggregateExpr(isMergeAggFn_));
-    } else {
-      msg.node_type = TExprNodeType.FUNCTION_CALL;
-    }
-  }
-
-  /**
-   * Aggregate functions are never constant.
-   */
-  @Override
-  public boolean isConstant() {
-    if (fn_ != null && fn_ instanceof AggregateFunction) return false;
-    return super.isConstant();
-  }
-
-  // Provide better error message for some aggregate builtins. These can be
-  // a bit more user friendly than a generic function not found.
-  // TODO: should we bother to do this? We could also improve the general
-  // error messages. For example, listing the alternatives.
-  protected String getFunctionNotFoundError(Type[] argTypes) {
-    if (fnName_.isBuiltin()) {
-      // Some custom error message for builtins
-      if (params_.isStar()) {
-        return "'*' can only be used in conjunction with COUNT";
-      }
-      if (fnName_.getFunction().equalsIgnoreCase("count")) {
-        if (!params_.isDistinct() && argTypes.length > 1) {
-          return "COUNT must have DISTINCT for multiple arguments: " + toSql();
-        }
-      }
-      if (fnName_.getFunction().equalsIgnoreCase("sum")) {
-        return "SUM requires a numeric parameter: " + toSql();
-      }
-      if (fnName_.getFunction().equalsIgnoreCase("avg")) {
-        return "AVG requires a numeric or timestamp parameter: " + toSql();
-      }
-    }
-
-    String[] argTypesSql = new String[argTypes.length];
-    for (int i = 0; i < argTypes.length; ++i) {
-      argTypesSql[i] = argTypes[i].toSql();
-    }
-    return String.format(
-        "No matching function with signature: %s(%s).",
-        fnName_, params_.isStar() ? "*" : Joiner.on(", ").join(argTypesSql));
-  }
-
-  /**
-   * Builtins that return decimals are specified as the wildcard decimal(decimal(*,*))
-   * and the specific decimal can only be determined based on the inputs. We currently
-   * don't have a mechanism to specify this with the UDF interface. Until we add
-   * that (i.e. allowing UDFs to participate in the planning phase), we will
-   * manually resolve the wildcard types for the few functions that need it.
-   * This can only be called for functions that return wildcard decimals and the first
-   * argument is a wildcard decimal.
-   * TODO: this prevents UDFs from using wildcard decimals and is in general not scalable.
-   * We should add a prepare_fn() to UDFs for doing this.
-   */
-  private Type resolveDecimalReturnType(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(type_.isWildcardDecimal());
-    Preconditions.checkState(fn_.getBinaryType() == TFunctionBinaryType.BUILTIN);
-    Preconditions.checkState(children_.size() > 0);
-
-    // Find first decimal input (some functions, such as if(), begin with non-decimal
-    // arguments).
-    ScalarType childType = null;
-    for (Expr child : children_) {
-      if (child.type_.isDecimal()) {
-        childType = (ScalarType) child.type_;
-        break;
-      }
-    }
-    Preconditions.checkState(childType != null && !childType.isWildcardDecimal());
-    Type returnType = childType;
-
-    if (fnName_.getFunction().equalsIgnoreCase("sum")) {
-      return childType.getMaxResolutionType();
-    }
-
-    int digitsBefore = childType.decimalPrecision() - childType.decimalScale();
-    int digitsAfter = childType.decimalScale();
-    if (fnName_.getFunction().equalsIgnoreCase("ceil") ||
-               fnName_.getFunction().equalsIgnoreCase("ceiling") ||
-               fnName_.getFunction().equals("floor") ||
-               fnName_.getFunction().equals("dfloor")) {
-      // These functions just return with scale 0 but can trigger rounding. We need
-      // to increase the precision by 1 to handle that.
-      ++digitsBefore;
-      digitsAfter = 0;
-    } else if (fnName_.getFunction().equalsIgnoreCase("truncate") ||
-               fnName_.getFunction().equalsIgnoreCase("dtrunc") ||
-               fnName_.getFunction().equalsIgnoreCase("round") ||
-               fnName_.getFunction().equalsIgnoreCase("dround")) {
-      if (children_.size() > 1) {
-        // The second argument to these functions is the desired scale, otherwise
-        // the default is 0.
-        Preconditions.checkState(children_.size() == 2);
-        if (children_.get(1).isNullLiteral()) {
-          throw new AnalysisException(fnName_.getFunction() +
-              "() cannot be called with a NULL second argument.");
-        }
-
-        if (!children_.get(1).isConstant()) {
-          // We don't allow calling truncate or round with a non-constant second
-          // (desired scale) argument. e.g. select round(col1, col2). This would
-          // mean we don't know the scale of the resulting type and would need some
-          // kind of dynamic type handling which is not yet possible. This seems like
-          // a reasonable restriction.
-          throw new AnalysisException(fnName_.getFunction() +
-              "() must be called with a constant second argument.");
-        }
-        NumericLiteral scaleLiteral = (NumericLiteral) LiteralExpr.create(
-            children_.get(1), analyzer.getQueryCtx());
-        digitsAfter = (int)scaleLiteral.getLongValue();
-        if (Math.abs(digitsAfter) > ScalarType.MAX_SCALE) {
-          throw new AnalysisException("Cannot round/truncate to scales greater than " +
-              ScalarType.MAX_SCALE + ".");
-        }
-        // Round/Truncate to a negative scale means to round to the digit before
-        // the decimal e.g. round(1234.56, -2) would be 1200.
-        // The resulting scale is always 0.
-        digitsAfter = Math.max(digitsAfter, 0);
-      } else {
-        // Round()/Truncate() with no second argument.
-        digitsAfter = 0;
-      }
-
-      if ((fnName_.getFunction().equalsIgnoreCase("round") ||
-           fnName_.getFunction().equalsIgnoreCase("dround")) &&
-          digitsAfter < childType.decimalScale()) {
-        // If we are rounding to fewer decimal places, it's possible we need another
-        // digit before the decimal.
-        ++digitsBefore;
-      }
-    }
-    Preconditions.checkState(returnType.isDecimal() && !returnType.isWildcardDecimal());
-    return ScalarType.createDecimalTypeInternal(digitsBefore + digitsAfter, digitsAfter);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    fnName_.analyze(analyzer);
-
-    if (isMergeAggFn_) {
-      // This is the function call expr after splitting up to a merge aggregation.
-      // The function has already been analyzed so just do the minimal sanity
-      // check here.
-      AggregateFunction aggFn = (AggregateFunction)fn_;
-      Preconditions.checkNotNull(aggFn);
-      Type intermediateType = aggFn.getIntermediateType();
-      if (intermediateType == null) intermediateType = type_;
-      Preconditions.checkState(!type_.isWildcardDecimal());
-      return;
-    }
-
-    Type[] argTypes = collectChildReturnTypes();
-
-    // User needs DB access.
-    Db db = analyzer.getDb(fnName_.getDb(), Privilege.VIEW_METADATA, true);
-    if (!db.containsFunction(fnName_.getFunction())) {
-      throw new AnalysisException(fnName_ + "() unknown");
-    }
-
-    if (fnName_.getFunction().equals("count") && params_.isDistinct()) {
-      // Treat COUNT(DISTINCT ...) special because of how we do the rewrite.
-      // There is no version of COUNT() that takes more than 1 argument but after
-      // the rewrite, we only need count(*).
-      // TODO: fix how we rewrite count distinct.
-      argTypes = new Type[0];
-      Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
-      fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-      type_ = fn_.getReturnType();
-      // Make sure BE doesn't see any TYPE_NULL exprs
-      for (int i = 0; i < children_.size(); ++i) {
-        if (getChild(i).getType().isNull()) {
-          uncheckedCastChild(ScalarType.BOOLEAN, i);
-        }
-      }
-      return;
-    }
-
-    // TODO: We allow implicit cast from string->timestamp but only
-    // support avg(timestamp). This means avg(string_col) would work
-    // from our casting rules. This is not right.
-    // We need to revisit where implicit casts are allowed for string
-    // to timestamp
-    if (fnName_.getFunction().equalsIgnoreCase("avg") &&
-      children_.size() == 1 && children_.get(0).getType().isStringType()) {
-      throw new AnalysisException(
-          "AVG requires a numeric or timestamp parameter: " + toSql());
-    }
-
-    Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
-    fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    if (fn_ == null || (!isInternalFnCall_ && !fn_.userVisible())) {
-      throw new AnalysisException(getFunctionNotFoundError(argTypes));
-    }
-
-    if (isAggregateFunction()) {
-      // subexprs must not contain aggregates
-      if (TreeNode.contains(children_, Expr.isAggregatePredicate())) {
-        throw new AnalysisException(
-            "aggregate function must not contain aggregate parameters: " + this.toSql());
-      }
-
-      // .. or analytic exprs
-      if (Expr.contains(children_, AnalyticExpr.class)) {
-        throw new AnalysisException(
-            "aggregate function must not contain analytic parameters: " + this.toSql());
-      }
-
-      // The catalog contains count() with no arguments to handle count(*) but don't
-      // accept count().
-      // TODO: can this be handled more cleanly. It does seem like a special case since
-      // no other aggregate functions (currently) can accept '*'.
-      if (fnName_.getFunction().equalsIgnoreCase("count") &&
-          !params_.isStar() && children_.size() == 0) {
-        throw new AnalysisException("count() is not allowed.");
-      }
-
-      // TODO: the distinct rewrite does not handle this but why?
-      if (params_.isDistinct()) {
-        // The second argument in group_concat(distinct) must be a constant expr that
-        // returns a string.
-        if (fnName_.getFunction().equalsIgnoreCase("group_concat")
-            && getChildren().size() == 2
-            && !getChild(1).isConstant()) {
-            throw new AnalysisException("Second parameter in GROUP_CONCAT(DISTINCT)" +
-                " must be a constant expression that returns a string.");
-        }
-        if (fn_.getBinaryType() != TFunctionBinaryType.BUILTIN) {
-          throw new AnalysisException("User defined aggregates do not support DISTINCT.");
-        }
-      }
-
-      AggregateFunction aggFn = (AggregateFunction)fn_;
-      if (aggFn.ignoresDistinct()) params_.setIsDistinct(false);
-    }
-
-    if (params_.isIgnoreNulls() && !isAnalyticFnCall_) {
-      throw new AnalysisException("Function " + fnName_.getFunction().toUpperCase()
-          + " does not accept the keyword IGNORE NULLS.");
-    }
-
-    if (isScalarFunction()) validateScalarFnParams(params_);
-    if (fn_ instanceof AggregateFunction
-        && ((AggregateFunction) fn_).isAnalyticFn()
-        && !((AggregateFunction) fn_).isAggregateFn()
-        && !isAnalyticFnCall_) {
-      throw new AnalysisException(
-          "Analytic function requires an OVER clause: " + toSql());
-    }
-
-    castForFunctionCall(false);
-    type_ = fn_.getReturnType();
-    if (type_.isDecimal() && type_.isWildcardDecimal()) {
-      type_ = resolveDecimalReturnType(analyzer);
-    }
-
-    // We do not allow any function to return a type CHAR or VARCHAR
-    // TODO add support for CHAR(N) and VARCHAR(N) return values in post 2.0,
-    // support for this was not added to the backend in 2.0
-    if (type_.isWildcardChar() || type_.isWildcardVarchar()) {
-      type_ = ScalarType.STRING;
-    }
-
-    // TODO(tmarshall): Differentiate based on the specific function.
-    if (hasChildCosts()) evalCost_ = getChildCosts() + FUNCTION_CALL_COST;
-  }
-
-  /**
-   * Checks that no special aggregate params are included in 'params' that would be
-   * invalid for a scalar function. Analysis of the param exprs is not done.
-   */
-  static void validateScalarFnParams(FunctionParams params)
-      throws AnalysisException {
-    if (params.isStar()) {
-      throw new AnalysisException("Cannot pass '*' to scalar function.");
-    }
-    if (params.isDistinct()) {
-      throw new AnalysisException("Cannot pass 'DISTINCT' to scalar function.");
-    }
-  }
-
-  @Override
-  public Expr clone() { return new FunctionCallExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/FunctionName.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/FunctionName.java b/fe/src/main/java/com/cloudera/impala/analysis/FunctionName.java
deleted file mode 100644
index 5609578..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/FunctionName.java
+++ /dev/null
@@ -1,148 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionName;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * Class to represent a function name. Function names are specified as
- * db.function_name.
- */
-public class FunctionName {
-  // Only set for parsed function names.
-  private final ArrayList<String> fnNamePath_;
-
-  // Set/validated during analysis.
-  private String db_;
-  private String fn_;
-  private boolean isBuiltin_ = false;
-  private boolean isAnalyzed_ = false;
-
-  /**
-   * C'tor for parsed function names. The function names could be invalid. The validity
-   * is checked during analysis.
-   */
-  public FunctionName(ArrayList<String> fnNamePath) {
-    fnNamePath_ = fnNamePath;
-  }
-
-  public FunctionName(String dbName, String fn) {
-    db_ = (dbName != null) ? dbName.toLowerCase() : null;
-    fn_ = fn.toLowerCase();
-    fnNamePath_ = null;
-  }
-
-  public FunctionName(String fn) {
-    this(null, fn);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!(obj instanceof FunctionName)) return false;
-    FunctionName o = (FunctionName)obj;
-    if ((db_ == null || o.db_ == null) && (db_ != o.db_)) {
-      if (db_ == null && o.db_ != null) return false;
-      if (db_ != null && o.db_ == null) return false;
-      if (!db_.equalsIgnoreCase(o.db_)) return false;
-    }
-    return fn_.equalsIgnoreCase(o.fn_);
-  }
-
-  public String getDb() { return db_; }
-  public String getFunction() { return fn_; }
-  public boolean isFullyQualified() { return db_ != null; }
-  public boolean isBuiltin() { return isBuiltin_; }
-  public ArrayList<String> getFnNamePath() { return fnNamePath_; }
-
-  @Override
-  public String toString() {
-    // The fnNamePath_ is not always set.
-    if (!isAnalyzed_ && fnNamePath_ != null) return Joiner.on(".").join(fnNamePath_);
-    if (db_ == null || isBuiltin_) return fn_;
-    return db_ + "." + fn_;
-  }
-
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    analyzeFnNamePath();
-    if (fn_.isEmpty()) throw new AnalysisException("Function name cannot be empty.");
-    for (int i = 0; i < fn_.length(); ++i) {
-      if (!isValidCharacter(fn_.charAt(i))) {
-        throw new AnalysisException(
-            "Function names must be all alphanumeric or underscore. " +
-            "Invalid name: " + fn_);
-      }
-    }
-    if (Character.isDigit(fn_.charAt(0))) {
-      throw new AnalysisException("Function cannot start with a digit: " + fn_);
-    }
-
-    // Resolve the database for this function.
-    if (!isFullyQualified()) {
-      Db builtinDb = analyzer.getCatalog().getBuiltinsDb();
-      if (builtinDb.containsFunction(fn_)) {
-        // If it isn't fully qualified and is the same name as a builtin, use
-        // the builtin.
-        db_ = Catalog.BUILTINS_DB;
-        isBuiltin_ = true;
-      } else {
-        db_ = analyzer.getDefaultDb();
-        isBuiltin_ = false;
-      }
-    } else {
-      isBuiltin_ = db_.equals(Catalog.BUILTINS_DB);
-    }
-    isAnalyzed_ = true;
-  }
-
-  private void analyzeFnNamePath() throws AnalysisException {
-    if (fnNamePath_ == null) return;
-    if (fnNamePath_.size() > 2 || fnNamePath_.isEmpty()) {
-      throw new AnalysisException(
-          String.format("Invalid function name: '%s'. Expected [dbname].funcname.",
-              Joiner.on(".").join(fnNamePath_)));
-    } else if (fnNamePath_.size() > 1) {
-      db_ = fnNamePath_.get(0);
-      fn_ = fnNamePath_.get(1).toLowerCase();
-    } else {
-      Preconditions.checkState(fnNamePath_.size() == 1);
-      fn_ = fnNamePath_.get(0).toLowerCase();
-    }
-  }
-
-  private boolean isValidCharacter(char c) {
-    return Character.isLetterOrDigit(c) || c == '_';
-  }
-
-  public TFunctionName toThrift() {
-    TFunctionName name = new TFunctionName(fn_);
-    name.setDb_name(db_);
-    return name;
-  }
-
-  public static FunctionName fromThrift(TFunctionName fnName) {
-    return new FunctionName(fnName.getDb_name(), fnName.getFunction_name());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/FunctionParams.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/FunctionParams.java b/fe/src/main/java/com/cloudera/impala/analysis/FunctionParams.java
deleted file mode 100644
index e6854a0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/FunctionParams.java
+++ /dev/null
@@ -1,68 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-/**
- * Return value of the grammar production that parses function
- * parameters. These parameters can be for scalar or aggregate functions.
- */
-class FunctionParams implements Cloneable {
-  private final boolean isStar_;
-  private boolean isDistinct_;
-  private boolean isIgnoreNulls_;
-  private final List<Expr> exprs_;
-
-  // c'tor for non-star params
-  public FunctionParams(boolean isDistinct, boolean isIgnoreNulls, List<Expr> exprs) {
-    this.isStar_ = false;
-    this.isDistinct_ = isDistinct;
-    this.isIgnoreNulls_ = isIgnoreNulls;
-    this.exprs_ = exprs;
-  }
-
-  // c'tor for non-star, non-ignore-nulls params
-  public FunctionParams(boolean isDistinct, List<Expr> exprs) {
-    this(isDistinct, false, exprs);
-  }
-
-  // c'tor for non-star, non-distinct, non-ignore-nulls params
-  public FunctionParams(List<Expr> exprs) {
-    this(false, false, exprs);
-  }
-
-  static public FunctionParams createStarParam() {
-    return new FunctionParams();
-  }
-
-  public boolean isStar() { return isStar_; }
-  public boolean isDistinct() { return isDistinct_; }
-  public boolean isIgnoreNulls() { return isIgnoreNulls_; }
-  public List<Expr> exprs() { return exprs_; }
-  public void setIsDistinct(boolean v) { isDistinct_ = v; }
-  public int size() { return exprs_ == null ? 0 : exprs_.size(); }
-
-  // c'tor for <agg>(*)
-  private FunctionParams() {
-    exprs_ = null;
-    isStar_ = true;
-    isDistinct_ = false;
-    isIgnoreNulls_ = false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokePrivStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokePrivStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokePrivStmt.java
deleted file mode 100644
index edaf22a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokePrivStmt.java
+++ /dev/null
@@ -1,94 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
-/**
- * Represents a "GRANT/REVOKE PRIVILEGE" statement.
- * All privilege checks on catalog objects are skipped when executing
- * GRANT/REVOKE statements. This is because we need to be able to create
- * privileges on an object before any privileges actually exist.
- * The GRANT/REVOKE statement itself will be authorized (currently by
- * the Sentry Service).
- */
-public class GrantRevokePrivStmt extends AuthorizationStmt {
-  private final PrivilegeSpec privilegeSpec_;
-  private final String roleName_;
-  private final boolean isGrantPrivStmt_;
-  private final boolean hasGrantOpt_;
-
-  // Set/modified during analysis
-  private Role role_;
-
-  public GrantRevokePrivStmt(String roleName, PrivilegeSpec privilegeSpec,
-      boolean isGrantPrivStmt, boolean hasGrantOpt) {
-    Preconditions.checkNotNull(privilegeSpec);
-    Preconditions.checkNotNull(roleName);
-    privilegeSpec_ = privilegeSpec;
-    roleName_ = roleName;
-    isGrantPrivStmt_ = isGrantPrivStmt;
-    hasGrantOpt_ = hasGrantOpt;
-  }
-
-  public TGrantRevokePrivParams toThrift() {
-    TGrantRevokePrivParams params = new TGrantRevokePrivParams();
-    params.setRole_name(roleName_);
-    params.setIs_grant(isGrantPrivStmt_);
-    List<TPrivilege> privileges = privilegeSpec_.toThrift();
-    for (TPrivilege privilege: privileges) {
-      privilege.setRole_id(role_.getId());
-      privilege.setHas_grant_opt(hasGrantOpt_);
-    }
-    params.setHas_grant_opt(hasGrantOpt_);
-    params.setPrivileges(privileges);
-    return params;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder(isGrantPrivStmt_ ? "GRANT " : "REVOKE ");
-    if (!isGrantPrivStmt_ && hasGrantOpt_) sb.append("GRANT OPTION FOR ");
-    sb.append(privilegeSpec_.toSql());
-    sb.append(isGrantPrivStmt_ ? " TO " : " FROM ");
-    sb.append(roleName_);
-    if (isGrantPrivStmt_ && hasGrantOpt_) sb.append(" WITH GRANT OPTION");
-    return sb.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (Strings.isNullOrEmpty(roleName_)) {
-      throw new AnalysisException("Role name in GRANT/REVOKE privilege cannot be " +
-          "empty.");
-    }
-    role_ = analyzer.getCatalog().getAuthPolicy().getRole(roleName_);
-    if (role_ == null) {
-      throw new AnalysisException(String.format("Role '%s' does not exist.", roleName_));
-    }
-    privilegeSpec_.analyze(analyzer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokeRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokeRoleStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokeRoleStmt.java
deleted file mode 100644
index 73240dc..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/GrantRevokeRoleStmt.java
+++ /dev/null
@@ -1,72 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a "GRANT/REVOKE ROLE" statement.
- */
-public class GrantRevokeRoleStmt extends AuthorizationStmt {
-  private final String roleName_;
-  private final String groupName_;
-  private final boolean isGrantStmt_;
-
-  public GrantRevokeRoleStmt(String roleName, String groupName, boolean isGrantStmt) {
-    Preconditions.checkNotNull(roleName);
-    Preconditions.checkNotNull(groupName);
-    roleName_ = roleName;
-    groupName_ = groupName;
-    isGrantStmt_ = isGrantStmt;
-  }
-
-  @Override
-  public String toSql() {
-    if (isGrantStmt_) {
-      return String.format("GRANT ROLE %s TO %s", roleName_, groupName_);
-    } else {
-      return String.format("REVOKE ROLE %s FROM %s", roleName_, groupName_);
-    }
-  }
-
-  public TGrantRevokeRoleParams toThrift() {
-    TGrantRevokeRoleParams params = new TGrantRevokeRoleParams();
-    params.setRole_names(Lists.newArrayList(roleName_));
-    params.setGroup_names(Lists.newArrayList(groupName_));
-    params.setIs_grant(isGrantStmt_);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (analyzer.getCatalog().getAuthPolicy().getRole(roleName_) == null) {
-      throw new AnalysisException(String.format("Role '%s' does not exist.", roleName_));
-    }
-    if (Strings.isNullOrEmpty(roleName_)) {
-      throw new AnalysisException("Role name in GRANT/REVOKE ROLE cannot be empty.");
-    }
-    if (Strings.isNullOrEmpty(groupName_)) {
-      throw new AnalysisException("Group name in GRANT/REVOKE ROLE cannot be empty.");
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/HdfsCachingOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/HdfsCachingOp.java b/fe/src/main/java/com/cloudera/impala/analysis/HdfsCachingOp.java
deleted file mode 100644
index 1b6cff2..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/HdfsCachingOp.java
+++ /dev/null
@@ -1,93 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-
-import com.cloudera.impala.catalog.HdfsCachePool;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.THdfsCachingOp;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents the partial SQL statement of specifying whether a table/partition
- * should or should not be marked as cached.
- */
-public class HdfsCachingOp implements ParseNode {
-  private final THdfsCachingOp cacheOp_;
-  private final BigDecimal parsedReplication_;
-
-  /**
-   * Creates an HdfsCachingOp that specifies the target should be uncached
-   */
-  public HdfsCachingOp() {
-    cacheOp_ = new THdfsCachingOp(false);
-    parsedReplication_ = null;
-  }
-
-  /**
-   * Creates an HdfsCachingOp that specifies the target should be cached in cachePoolName
-   * with an optional replication factor
-   */
-  public HdfsCachingOp(String cachePoolName, BigDecimal replication) {
-    cacheOp_ = new THdfsCachingOp(true);
-    cacheOp_.setCache_pool_name(cachePoolName);
-    parsedReplication_ = replication;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (cacheOp_.isSet_cached()) {
-      String poolName = cacheOp_.getCache_pool_name();
-      Preconditions.checkNotNull(poolName);
-      if (poolName.isEmpty()) {
-        throw new AnalysisException("Cache pool name cannot be empty.");
-      }
-
-      HdfsCachePool cachePool = analyzer.getCatalog().getHdfsCachePool(poolName);
-      if (cachePool == null) {
-        throw new AnalysisException(
-            "The specified cache pool does not exist: " + poolName);
-      }
-
-      if (parsedReplication_ != null && (parsedReplication_.longValue() <= 0 ||
-            parsedReplication_.longValue() > Short.MAX_VALUE)) {
-          throw new AnalysisException(
-              "Cache replication factor must be between 0 and Short.MAX_VALUE");
-      }
-
-      if (parsedReplication_ != null) {
-        cacheOp_.setReplication(parsedReplication_.shortValue());
-      }
-    }
-  }
-
-  @Override
-  public String toSql() {
-    return !shouldCache() ? "UNCACHED" : "CACHED IN '" + getCachePoolName() + "' WITH " +
-        "REPLICATION = " + parsedReplication_.longValue();
-  }
-
-  public THdfsCachingOp toThrift() { return cacheOp_; }
-
-  public boolean shouldCache() { return cacheOp_.isSet_cached(); }
-
-  public String getCachePoolName() {
-    return shouldCache() ? cacheOp_.getCache_pool_name() : null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/HdfsUri.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/HdfsUri.java b/fe/src/main/java/com/cloudera/impala/analysis/HdfsUri.java
deleted file mode 100644
index 9fbe467..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/HdfsUri.java
+++ /dev/null
@@ -1,128 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-
-import com.cloudera.impala.authorization.AuthorizeableUri;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.util.FsPermissionChecker;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a Hadoop FileSystem URI in a SQL statement.
- */
-public class HdfsUri {
-  private final String location_;
-
-  // Set during analysis
-  private Path uriPath_;
-
-  public HdfsUri(String location) {
-    Preconditions.checkNotNull(location);
-    this.location_ = location.trim();
-  }
-
-  public Path getPath() {
-    Preconditions.checkNotNull(uriPath_);
-    return uriPath_;
-  }
-
-  public void analyze(Analyzer analyzer, Privilege privilege)
-      throws AnalysisException {
-    analyze(analyzer, privilege, FsAction.NONE, true);
-  }
-
-  public void analyze(Analyzer analyzer, Privilege privilege, FsAction perm)
-      throws AnalysisException {
-    analyze(analyzer, privilege, perm, true);
-  }
-
-  public void analyze(Analyzer analyzer, Privilege privilege, boolean registerPrivReq)
-      throws AnalysisException {
-    analyze(analyzer, privilege, FsAction.NONE, registerPrivReq);
-  }
-
-  /**
-   * Analyzes the URI.
-   * Optionally check location path permission, issue warning if impala user doesn't
-   * have sufficient access rights.
-   * Optionally register a privilege request. Used by GRANT/REVOKE privilege statements.
-   */
-  public void analyze(Analyzer analyzer, Privilege privilege, FsAction perm,
-      boolean registerPrivReq) throws AnalysisException {
-    if (location_.isEmpty()) {
-      throw new AnalysisException("URI path cannot be empty.");
-    }
-
-    uriPath_ = new Path(location_);
-    if (!uriPath_.isUriPathAbsolute()) {
-      throw new AnalysisException("URI path must be absolute: " + uriPath_);
-    }
-
-    uriPath_ = FileSystemUtil.createFullyQualifiedPath(uriPath_);
-
-    // Check if parent path exists and if impala is allowed to access it.
-    Path parentPath = uriPath_.getParent();
-    try {
-      FileSystem fs = uriPath_.getFileSystem(FileSystemUtil.getConfiguration());
-      boolean pathExists = false;
-      StringBuilder errorMsg = new StringBuilder();
-      try {
-        pathExists = fs.exists(parentPath);
-        if (!pathExists) errorMsg.append("Path does not exist.");
-      } catch (Exception e) {
-        errorMsg.append(e.getMessage());
-      }
-      if (!pathExists) {
-        analyzer.addWarning(String.format("Path '%s' cannot be reached: %s",
-            parentPath, errorMsg.toString()));
-      } else if (perm != FsAction.NONE) {
-        FsPermissionChecker checker = FsPermissionChecker.getInstance();
-        if (!checker.getPermissions(fs, parentPath).checkPermissions(perm)) {
-          analyzer.addWarning(String.format(
-              "Impala does not have %s access to path '%s'",
-              perm.toString(), parentPath));
-        }
-      }
-    } catch (IOException e) {
-      throw new AnalysisException(e.getMessage(), e);
-    }
-
-    if (registerPrivReq) {
-      analyzer.registerPrivReq(new PrivilegeRequest(
-          new AuthorizeableUri(uriPath_.toString()), privilege));
-    }
-  }
-
-  @Override
-  public String toString() {
-    // If uriPath is null (this HdfsURI has not been analyzed yet) just return the raw
-    // location string the caller passed in.
-    return uriPath_ == null ? location_ : uriPath_.toString();
-  }
-
-  public String getLocation() { return location_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/InPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/InPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/InPredicate.java
deleted file mode 100644
index 28d8f12..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/InPredicate.java
+++ /dev/null
@@ -1,234 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Class representing a [NOT] IN predicate. It determines if a specified value
- * (first child) matches any value in a subquery (second child) or a list
- * of values (remaining children).
- */
-public class InPredicate extends Predicate {
-  private static final String IN_SET_LOOKUP = "in_set_lookup";
-  private static final String NOT_IN_SET_LOOKUP = "not_in_set_lookup";
-  private static final String IN_ITERATE= "in_iterate";
-  private static final String NOT_IN_ITERATE = "not_in_iterate";
-  private final boolean isNotIn_;
-
-  public boolean isNotIn() { return isNotIn_; }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue;
-      // TODO we do not support codegen for CHAR and the In predicate must be codegened
-      // because it has variable number of arguments. This will force CHARs to be
-      // cast up to strings; meaning that "in" comparisons will not have CHAR comparison
-      // semantics.
-      if (t.getPrimitiveType() == PrimitiveType.CHAR) continue;
-
-      String typeString = t.getPrimitiveType().toString().toLowerCase();
-      if (t.isScalarType(PrimitiveType.VARCHAR)) typeString = "string";
-
-      db.addBuiltin(ScalarFunction.createBuiltin(IN_ITERATE,
-          Lists.newArrayList(t, t), true, Type.BOOLEAN,
-          "impala::InPredicate::InIterate", null, null,  false));
-      db.addBuiltin(ScalarFunction.createBuiltin(NOT_IN_ITERATE,
-          Lists.newArrayList(t, t), true, Type.BOOLEAN,
-          "impala::InPredicate::NotInIterate", null, null, false));
-
-      String prepareFn = "impala::InPredicate::SetLookupPrepare_" + typeString;
-      String closeFn = "impala::InPredicate::SetLookupClose_" + typeString;
-
-      db.addBuiltin(ScalarFunction.createBuiltin(IN_SET_LOOKUP,
-          Lists.newArrayList(t, t), true, Type.BOOLEAN,
-          "impala::InPredicate::InSetLookup", prepareFn, closeFn,  false));
-      db.addBuiltin(ScalarFunction.createBuiltin(NOT_IN_SET_LOOKUP,
-          Lists.newArrayList(t, t), true, Type.BOOLEAN,
-          "impala::InPredicate::NotInSetLookup", prepareFn, closeFn, false));
-
-    }
-  }
-
-  // First child is the comparison expr for which we
-  // should check membership in the inList (the remaining children).
-  public InPredicate(Expr compareExpr, List<Expr> inList, boolean isNotIn) {
-    children_.add(compareExpr);
-    children_.addAll(inList);
-    isNotIn_ = isNotIn;
-  }
-
-  // C'tor for initializing an [NOT] IN predicate with a subquery child.
-  public InPredicate(Expr compareExpr, Expr subquery, boolean isNotIn) {
-    Preconditions.checkNotNull(compareExpr);
-    Preconditions.checkNotNull(subquery);
-    children_.add(compareExpr);
-    children_.add(subquery);
-    isNotIn_ = isNotIn;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected InPredicate(InPredicate other) {
-    super(other);
-    isNotIn_ = other.isNotIn_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    if (contains(Subquery.class)) {
-      // An [NOT] IN predicate with a subquery must contain two children, the second of
-      // which is a Subquery.
-      if (children_.size() != 2 || !(getChild(1) instanceof Subquery)) {
-        throw new AnalysisException("Unsupported IN predicate with a subquery: " +
-            toSqlImpl());
-      }
-      Subquery subquery = (Subquery)getChild(1);
-      if (!subquery.returnsScalarColumn()) {
-        throw new AnalysisException("Subquery must return a single column: " +
-            subquery.toSql());
-      }
-
-      // Ensure that the column in the lhs of the IN predicate and the result of
-      // the subquery are type compatible. No need to perform any
-      // casting at this point. Any casting needed will be performed when the
-      // subquery is unnested.
-      ArrayList<Expr> subqueryExprs = subquery.getStatement().getResultExprs();
-      Expr compareExpr = children_.get(0);
-      Expr subqueryExpr = subqueryExprs.get(0);
-      analyzer.getCompatibleType(compareExpr.getType(), compareExpr, subqueryExpr);
-    } else {
-      Preconditions.checkState(getChildren().size() >= 2);
-      analyzer.castAllToCompatibleType(children_);
-      Type childType = children_.get(0).getType();
-
-      if (childType.isNull()) {
-        // Make sure the BE never sees TYPE_NULL by picking an arbitrary type
-        for (int i = 0; i < children_.size(); ++i) {
-          uncheckedCastChild(Type.BOOLEAN, i);
-        }
-      }
-
-      // Choose SetLookup or Iterate strategy. SetLookup can be used if all the exprs in
-      // the IN list are constant, and is faster than iterating if the IN list is big
-      // enough.
-      boolean allConstant = true;
-      for (int i = 1; i < children_.size(); ++i) {
-        if (!children_.get(i).isConstant()) {
-          allConstant = false;
-          break;
-        }
-      }
-      boolean useSetLookup = allConstant;
-      // Threshold based on InPredicateBenchmark results
-      int setLookupThreshold = children_.get(0).getType().isStringType() ? 6 : 2;
-      if (children_.size() - 1 < setLookupThreshold) useSetLookup = false;
-
-      // Only lookup fn_ if all subqueries have been rewritten. If the second child is a
-      // subquery, it will have type ArrayType, which cannot be resolved to a builtin
-      // function and will fail analysis.
-      Type[] argTypes = {getChild(0).type_, getChild(1).type_};
-      if (useSetLookup) {
-        fn_ = getBuiltinFunction(analyzer, isNotIn_ ? NOT_IN_SET_LOOKUP : IN_SET_LOOKUP,
-            argTypes, CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-      } else {
-        fn_ = getBuiltinFunction(analyzer, isNotIn_ ? NOT_IN_ITERATE : IN_ITERATE,
-            argTypes, CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-      }
-      Preconditions.checkNotNull(fn_);
-      Preconditions.checkState(fn_.getReturnType().isBoolean());
-      castForFunctionCall(false);
-    }
-
-    // TODO: Fix selectivity_ for nested predicate
-    Reference<SlotRef> slotRefRef = new Reference<SlotRef>();
-    Reference<Integer> idxRef = new Reference<Integer>();
-    if (isSingleColumnPredicate(slotRefRef, idxRef)
-        && idxRef.getRef() == 0
-        && slotRefRef.getRef().getNumDistinctValues() > 0) {
-      selectivity_ = (double) (getChildren().size() - 1)
-          / (double) slotRefRef.getRef().getNumDistinctValues();
-      selectivity_ = Math.max(0.0, Math.min(1.0, selectivity_));
-    }
-
-    if (hasChildCosts()) {
-      // BINARY_PREDICATE_COST accounts for the cost of performing the comparison.
-      evalCost_ = getChildCosts() + BINARY_PREDICATE_COST * (children_.size() - 1);
-    }
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    // Can't serialize a predicate with a subquery
-    Preconditions.checkState(!contains(Subquery.class));
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  @Override
-  public String toSqlImpl() {
-    StringBuilder strBuilder = new StringBuilder();
-    String notStr = (isNotIn_) ? "NOT " : "";
-    strBuilder.append(getChild(0).toSql() + " " + notStr + "IN ");
-    boolean hasSubquery = contains(Subquery.class);
-    if (!hasSubquery) strBuilder.append("(");
-    for (int i = 1; i < children_.size(); ++i) {
-      strBuilder.append(getChild(i).toSql());
-      strBuilder.append((i+1 != children_.size()) ? ", " : "");
-    }
-    if (!hasSubquery) strBuilder.append(")");
-    return strBuilder.toString();
-  }
-
-  /**
-   * If predicate is of the form "<SlotRef> [NOT] IN", returns the
-   * SlotRef.
-   */
-  @Override
-  public SlotRef getBoundSlot() {
-    return getChild(0).unwrapSlotRef(true);
-  }
-
-  /**
-   * Negates an InPredicate.
-   */
-  @Override
-  public Expr negate() {
-    return new InPredicate(getChild(0), children_.subList(1, children_.size()),
-        !isNotIn_);
-  }
-
-  @Override
-  public Expr clone() { return new InPredicate(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/InlineViewRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/InlineViewRef.java b/fe/src/main/java/com/cloudera/impala/analysis/InlineViewRef.java
deleted file mode 100644
index a6c62b0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/InlineViewRef.java
+++ /dev/null
@@ -1,339 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * An inline view is a query statement with an alias. Inline views can be parsed directly
- * from a query string or represent a reference to a local or catalog view.
- */
-public class InlineViewRef extends TableRef {
-  private final static Logger LOG = LoggerFactory.getLogger(SelectStmt.class);
-
-  // Catalog or local view that is referenced.
-  // Null for inline views parsed directly from a query string.
-  private final View view_;
-
-  // If not null, these will serve as the column labels for the inline view. This provides
-  // a layer of separation between column labels visible from outside the inline view
-  // and column labels used in the query definition. Either all or none of the column
-  // labels must be overridden.
-  private List<String> explicitColLabels_;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // The select or union statement of the inline view
-  protected QueryStmt queryStmt_;
-
-  // queryStmt has its own analysis context
-  protected Analyzer inlineViewAnalyzer_;
-
-  // list of tuple ids materialized by queryStmt
-  protected final ArrayList<TupleId> materializedTupleIds_ = Lists.newArrayList();
-
-  // Map inline view's output slots to the corresponding resultExpr of queryStmt.
-  protected final ExprSubstitutionMap smap_;
-
-  // Map inline view's output slots to the corresponding baseTblResultExpr of queryStmt.
-  protected final ExprSubstitutionMap baseTblSmap_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  /**
-   * C'tor for creating inline views parsed directly from the a query string.
-   */
-  public InlineViewRef(String alias, QueryStmt queryStmt) {
-    super(null, alias);
-    Preconditions.checkNotNull(queryStmt);
-    queryStmt_ = queryStmt;
-    view_ = null;
-    smap_ = new ExprSubstitutionMap();
-    baseTblSmap_ = new ExprSubstitutionMap();
-  }
-
-  public InlineViewRef(String alias, QueryStmt queryStmt, List<String> colLabels) {
-    this(alias, queryStmt);
-    explicitColLabels_ = Lists.newArrayList(colLabels);
-  }
-
-  /**
-   * C'tor for creating inline views that replace a local or catalog view ref.
-   */
-  public InlineViewRef(View view, TableRef origTblRef) {
-    super(view.getTableName().toPath(), origTblRef.getExplicitAlias());
-    queryStmt_ = view.getQueryStmt().clone();
-    queryStmt_.reset();
-    if (view.isLocalView()) queryStmt_.reset();
-    view_ = view;
-    smap_ = new ExprSubstitutionMap();
-    baseTblSmap_ = new ExprSubstitutionMap();
-    setJoinAttrs(origTblRef);
-    explicitColLabels_ = view.getColLabels();
-    // Set implicit aliases if no explicit one was given.
-    if (hasExplicitAlias()) return;
-    aliases_ = new String[] {
-        view_.getTableName().toString().toLowerCase(), view_.getName().toLowerCase()
-    };
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  public InlineViewRef(InlineViewRef other) {
-    super(other);
-    Preconditions.checkNotNull(other.queryStmt_);
-    view_ = other.view_;
-    queryStmt_ = other.queryStmt_.clone();
-    inlineViewAnalyzer_ = other.inlineViewAnalyzer_;
-    if (other.explicitColLabels_ != null) {
-      explicitColLabels_ = Lists.newArrayList(other.explicitColLabels_);
-    }
-    materializedTupleIds_.addAll(other.materializedTupleIds_);
-    smap_ = other.smap_.clone();
-    baseTblSmap_ = other.baseTblSmap_.clone();
-  }
-
-  /**
-   * Analyzes the inline view query block in a child analyzer of 'analyzer', creates
-   * a new tuple descriptor for the inline view and registers auxiliary eq predicates
-   * between the slots of that descriptor and the select list exprs of the inline view;
-   * then performs join clause analysis.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-
-    // Analyze the inline view query statement with its own analyzer
-    inlineViewAnalyzer_ = new Analyzer(analyzer);
-
-    // Catalog views refs require special analysis settings for authorization.
-    boolean isCatalogView = (view_ != null && !view_.isLocalView());
-    if (isCatalogView) {
-      analyzer.registerAuthAndAuditEvent(view_, analyzer);
-      if (inlineViewAnalyzer_.isExplain()) {
-        // If the user does not have privileges on the view's definition
-        // then we report a masked authorization error so as not to reveal
-        // privileged information (e.g., the existence of a table).
-        inlineViewAnalyzer_.setAuthErrMsg(
-            String.format("User '%s' does not have privileges to " +
-            "EXPLAIN this statement.", analyzer.getUser().getName()));
-      } else {
-        // If this is not an EXPLAIN statement, auth checks for the view
-        // definition should be disabled.
-        inlineViewAnalyzer_.setEnablePrivChecks(false);
-      }
-    }
-
-    inlineViewAnalyzer_.setUseHiveColLabels(
-        isCatalogView ? true : analyzer.useHiveColLabels());
-    queryStmt_.analyze(inlineViewAnalyzer_);
-    correlatedTupleIds_.addAll(queryStmt_.getCorrelatedTupleIds(inlineViewAnalyzer_));
-    if (explicitColLabels_ != null) {
-      Preconditions.checkState(
-          explicitColLabels_.size() == queryStmt_.getColLabels().size());
-    }
-
-    inlineViewAnalyzer_.setHasLimitOffsetClause(
-        queryStmt_.hasLimit() || queryStmt_.hasOffset());
-    queryStmt_.getMaterializedTupleIds(materializedTupleIds_);
-    desc_ = analyzer.registerTableRef(this);
-    isAnalyzed_ = true;  // true now that we have assigned desc
-
-    // For constant selects we materialize its exprs into a tuple.
-    if (materializedTupleIds_.isEmpty()) {
-      Preconditions.checkState(queryStmt_ instanceof SelectStmt);
-      Preconditions.checkState(((SelectStmt) queryStmt_).getTableRefs().isEmpty());
-      desc_.setIsMaterialized(true);
-      materializedTupleIds_.add(desc_.getId());
-    }
-
-    // create smap_ and baseTblSmap_ and register auxiliary eq predicates between our
-    // tuple descriptor's slots and our *unresolved* select list exprs;
-    // we create these auxiliary predicates so that the analyzer can compute the value
-    // transfer graph through this inline view correctly (ie, predicates can get
-    // propagated through the view);
-    // if the view stmt contains analytic functions, we cannot propagate predicates
-    // into the view, unless the predicates are compatible with the analytic
-    // function's partition by clause, because those extra filters
-    // would alter the results of the analytic functions (see IMPALA-1243)
-    // TODO: relax this a bit by allowing propagation out of the inline view (but
-    // not into it)
-    for (int i = 0; i < getColLabels().size(); ++i) {
-      String colName = getColLabels().get(i).toLowerCase();
-      Expr colExpr = queryStmt_.getResultExprs().get(i);
-      Path p = new Path(desc_, Lists.newArrayList(colName));
-      Preconditions.checkState(p.resolve());
-      SlotDescriptor slotDesc = analyzer.registerSlotRef(p);
-      slotDesc.setSourceExpr(colExpr);
-      slotDesc.setStats(ColumnStats.fromExpr(colExpr));
-      SlotRef slotRef = new SlotRef(slotDesc);
-      smap_.put(slotRef, colExpr);
-      baseTblSmap_.put(slotRef, queryStmt_.getBaseTblResultExprs().get(i));
-      if (createAuxPredicate(colExpr)) {
-        analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), colExpr.clone());
-      }
-    }
-    LOG.trace("inline view " + getUniqueAlias() + " smap: " + smap_.debugString());
-    LOG.trace("inline view " + getUniqueAlias() + " baseTblSmap: " +
-        baseTblSmap_.debugString());
-
-    analyzeHints(analyzer);
-    // Now do the remaining join analysis
-    analyzeJoin(analyzer);
-  }
-
-  /**
-   * Checks if an auxiliary predicate should be created for an expr. Returns False if the
-   * inline view has a SELECT stmt with analytic functions and the expr is not in the
-   * common partition exprs of all the analytic functions computed by this inline view.
-   */
-  public boolean createAuxPredicate(Expr e) {
-    if (!(queryStmt_ instanceof SelectStmt)
-        || !((SelectStmt) queryStmt_).hasAnalyticInfo()) {
-      return true;
-    }
-    AnalyticInfo analyticInfo = ((SelectStmt) queryStmt_).getAnalyticInfo();
-    return analyticInfo.getCommonPartitionExprs().contains(e);
-  }
-
-  /**
-   * Create and register a non-materialized tuple descriptor for this inline view.
-   * This method is called from the analyzer when registering this inline view.
-   * Create a non-materialized tuple descriptor for this inline view.
-   */
-  @Override
-  public TupleDescriptor createTupleDescriptor(Analyzer analyzer)
-      throws AnalysisException {
-    int numColLabels = getColLabels().size();
-    Preconditions.checkState(numColLabels > 0);
-    HashSet<String> uniqueColAliases = Sets.newHashSetWithExpectedSize(numColLabels);
-    ArrayList<StructField> fields = Lists.newArrayListWithCapacity(numColLabels);
-    for (int i = 0; i < numColLabels; ++i) {
-      // inline view select statement has been analyzed. Col label should be filled.
-      Expr selectItemExpr = queryStmt_.getResultExprs().get(i);
-      String colAlias = getColLabels().get(i).toLowerCase();
-
-      // inline view col cannot have duplicate name
-      if (!uniqueColAliases.add(colAlias)) {
-        throw new AnalysisException("duplicated inline view column alias: '" +
-            colAlias + "'" + " in inline view " + "'" + getUniqueAlias() + "'");
-      }
-      fields.add(new StructField(colAlias, selectItemExpr.getType(), null));
-    }
-
-    // Create the non-materialized tuple and set its type.
-    TupleDescriptor result = analyzer.getDescTbl().createTupleDescriptor(
-        getClass().getSimpleName() + " " + getUniqueAlias());
-    result.setIsMaterialized(false);
-    result.setType(new StructType(fields));
-    return result;
-  }
-
-  @Override
-  public List<TupleId> getMaterializedTupleIds() {
-    Preconditions.checkState(isAnalyzed_);
-    Preconditions.checkState(materializedTupleIds_.size() > 0);
-    return materializedTupleIds_;
-  }
-
-  public Analyzer getAnalyzer() {
-    Preconditions.checkState(isAnalyzed_);
-    return inlineViewAnalyzer_;
-  }
-
-  public ExprSubstitutionMap getSmap() {
-    Preconditions.checkState(isAnalyzed_);
-    return smap_;
-  }
-
-  public ExprSubstitutionMap getBaseTblSmap() {
-    Preconditions.checkState(isAnalyzed_);
-    return baseTblSmap_;
-  }
-
-  public QueryStmt getViewStmt() { return queryStmt_; }
-  public void setRewrittenViewStmt(QueryStmt stmt) {
-    Preconditions.checkState(getAnalyzer().containsSubquery());
-    queryStmt_ = stmt;
-  }
-
-  public List<String> getExplicitColLabels() { return explicitColLabels_; }
-
-  public List<String> getColLabels() {
-    if (explicitColLabels_ != null) return explicitColLabels_;
-    return queryStmt_.getColLabels();
-  }
-
-  @Override
-  protected TableRef clone() { return new InlineViewRef(this); }
-
-  @Override
-  public void reset() {
-    super.reset();
-    queryStmt_.reset();
-    inlineViewAnalyzer_ = null;
-    materializedTupleIds_.clear();
-    smap_.clear();
-    baseTblSmap_.clear();
-  }
-
-  @Override
-  protected String tableRefToSql() {
-    // Enclose the alias in quotes if Hive cannot parse it without quotes.
-    // This is needed for view compatibility between Impala and Hive.
-    String aliasSql = null;
-    String alias = getExplicitAlias();
-    if (alias != null) aliasSql = ToSqlUtils.getIdentSql(alias);
-    if (view_ != null) {
-      return view_.getTableName().toSql() + (aliasSql == null ? "" : " " + aliasSql);
-    }
-    Preconditions.checkNotNull(aliasSql);
-    StringBuilder sql = new StringBuilder()
-        .append("(")
-        .append(queryStmt_.toSql())
-        .append(") ")
-        .append(aliasSql);
-    // Add explicit col labels for debugging even though this syntax isn't supported.
-    if (explicitColLabels_ != null) {
-      sql.append(" (");
-      for (int i = 0; i < getExplicitColLabels().size(); i++) {
-        if (i > 0) sql.append(", ");
-        sql.append(ToSqlUtils.getIdentSql(getExplicitColLabels().get(i)));
-      }
-      sql.append(")");
-    }
-    return sql.toString();
-  }
-}


[23/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/UdfExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/UdfExecutor.java b/fe/src/main/java/com/cloudera/impala/hive/executor/UdfExecutor.java
deleted file mode 100644
index 242c704..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/UdfExecutor.java
+++ /dev/null
@@ -1,643 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.ArrayList;
-
-import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.serde2.io.ByteWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.io.ShortWritable;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.log4j.Logger;
-import org.apache.thrift.protocol.TBinaryProtocol;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.THiveUdfExecutorCtorParams;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.util.UnsafeUtil;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-// Wrapper object to run hive UDFs. This class works with UdfCallExpr in the
-// backend to marshall data back and forth between the execution engine and
-// the java UDF class.
-// See the comments in be/src/exprs/hive-udf-call.h for more details.
-// TODO: should we cache loaded jars and classes?
-@SuppressWarnings("restriction")
-public class UdfExecutor {
-  private static final Logger LOG = Logger.getLogger(UdfExecutor.class);
-  // By convention, the function in the class must be called evaluate()
-  public static final String UDF_FUNCTION_NAME = "evaluate";
-
-  // Object to deserialize ctor params from BE.
-  private final static TBinaryProtocol.Factory PROTOCOL_FACTORY =
-    new TBinaryProtocol.Factory();
-
-  private UDF udf_;
-  private Method method_;
-
-  // Return and argument types of the function inferred from the udf method signature.
-  // The JavaUdfDataType enum maps it to corresponding primitive type.
-  private JavaUdfDataType[] argTypes_;
-  private JavaUdfDataType retType_;
-
-  // Input buffer from the backend. This is valid for the duration of an evaluate() call.
-  // These buffers are allocated in the BE.
-  private final long inputBufferPtr_;
-  private final long inputNullsPtr_;
-
-  // This is the byte offset in inputBufferPtr to the start of the input argument.
-  // e.g. *inputBufferPtr_[inputBufferOffsets[i]] is the ith input argument.
-  private final int[] inputBufferOffsets_;
-
-  // Output buffer to return non-string values. This buffers are allocated in the BE.
-  private final long outputBufferPtr_;
-  private final long outputNullPtr_;
-
-  // For StringValue return types, outputBufferPtr_ is the location of the 16-byte
-  // StringValue object. StringValue.ptr is set to outBufferStringPtr_. This buffer
-  // grows as necessary to fit the return string.
-  // This is allocated from the FE.
-  private long outBufferStringPtr_;
-
-  // Size of outBufferStringPtr_.
-  private int outBufferCapacity_;
-
-  // Preconstructed input objects for the UDF. This minimizes object creation overhead
-  // as these objects are reused across calls to evaluate().
-  private Object[] inputObjects_;
-  private Object[] inputArgs_; // inputArgs_[i] is either inputObjects_[i] or null
-
-  // Data types that are supported as return or argument types in Java UDFs.
-  public enum JavaUdfDataType {
-    INVALID_TYPE("INVALID_TYPE", TPrimitiveType.INVALID_TYPE),
-    BOOLEAN("BOOLEAN", TPrimitiveType.BOOLEAN),
-    BOOLEAN_WRITABLE("BOOLEAN_WRITABLE", TPrimitiveType.BOOLEAN),
-    TINYINT("TINYINT", TPrimitiveType.TINYINT),
-    BYTE_WRITABLE("BYTE_WRITABLE", TPrimitiveType.TINYINT),
-    SMALLINT("SMALLINT", TPrimitiveType.SMALLINT),
-    SHORT_WRITABLE("SHORT_WRITABLE", TPrimitiveType.SMALLINT),
-    INT("INT", TPrimitiveType.INT),
-    INT_WRITABLE("INT_WRITABLE", TPrimitiveType.INT),
-    BIGINT("BIGINT", TPrimitiveType.BIGINT),
-    LONG_WRITABLE("LONG_WRITABLE", TPrimitiveType.BIGINT),
-    FLOAT("FLOAT", TPrimitiveType.FLOAT),
-    FLOAT_WRITABLE("FLOAT_WRITABLE", TPrimitiveType.FLOAT),
-    DOUBLE("DOUBLE", TPrimitiveType.DOUBLE),
-    DOUBLE_WRITABLE("DOUBLE", TPrimitiveType.DOUBLE),
-    STRING("STRING", TPrimitiveType.STRING),
-    TEXT("TEXT", TPrimitiveType.STRING),
-    BYTES_WRITABLE("BYTES_WRITABLE", TPrimitiveType.STRING),
-    BYTE_ARRAY("BYTE_ARRAY", TPrimitiveType.STRING);
-
-    private final String description_;
-    private final TPrimitiveType thriftType_;
-
-    private JavaUdfDataType(String description, TPrimitiveType thriftType) {
-      description_ = description;
-      thriftType_ = thriftType;
-    }
-
-    @Override
-    public String toString() { return description_; }
-
-    public TPrimitiveType getPrimitiveType() { return thriftType_; }
-
-    public static JavaUdfDataType getType(Class<?> c) {
-      if (c == BooleanWritable.class) {
-        return JavaUdfDataType.BOOLEAN_WRITABLE;
-      } else if (c == boolean.class || c == Boolean.class) {
-        return JavaUdfDataType.BOOLEAN;
-      } else if (c == ByteWritable.class) {
-        return JavaUdfDataType.BYTE_WRITABLE;
-      } else if (c == byte.class || c == Byte.class) {
-        return JavaUdfDataType.TINYINT;
-      } else if (c == ShortWritable.class) {
-        return JavaUdfDataType.SHORT_WRITABLE;
-      } else if (c == short.class || c == Short.class) {
-        return JavaUdfDataType.SMALLINT;
-      } else if (c == IntWritable.class) {
-        return JavaUdfDataType.INT_WRITABLE;
-      } else if (c == int.class || c == Integer.class) {
-        return JavaUdfDataType.INT;
-      } else if (c == LongWritable.class) {
-        return JavaUdfDataType.LONG_WRITABLE;
-      } else if (c == long.class || c == Long.class) {
-        return JavaUdfDataType.BIGINT;
-      } else if (c == FloatWritable.class) {
-        return JavaUdfDataType.FLOAT_WRITABLE;
-      } else if (c == float.class || c == Float.class) {
-        return JavaUdfDataType.FLOAT;
-      } else if (c == DoubleWritable.class) {
-        return JavaUdfDataType.DOUBLE_WRITABLE;
-      } else if (c == double.class || c == Double.class) {
-        return JavaUdfDataType.DOUBLE;
-      } else if (c == byte[].class) {
-        return JavaUdfDataType.BYTE_ARRAY;
-      } else if (c == BytesWritable.class) {
-        return JavaUdfDataType.BYTES_WRITABLE;
-      } else if (c == Text.class) {
-        return JavaUdfDataType.TEXT;
-      } else if (c == String.class) {
-        return JavaUdfDataType.STRING;
-      }
-      return JavaUdfDataType.INVALID_TYPE;
-    }
-
-    public static boolean isSupported(Type t) {
-      for(JavaUdfDataType javaType: JavaUdfDataType.values()) {
-        if (javaType == JavaUdfDataType.INVALID_TYPE) continue;
-        if (javaType.getPrimitiveType() == t.getPrimitiveType().toThrift()) {
-          return true;
-        }
-      }
-      return false;
-    }
-  }
-
-  /**
-   * Create a UdfExecutor, using parameters from a serialized thrift object. Used by
-   * the backend.
-   */
-  public UdfExecutor(byte[] thriftParams) throws ImpalaException {
-    THiveUdfExecutorCtorParams request = new THiveUdfExecutorCtorParams();
-    JniUtil.deserializeThrift(PROTOCOL_FACTORY, request, thriftParams);
-
-    String className = request.fn.scalar_fn.symbol;
-    String jarFile = request.local_location;
-    Type retType = Type.fromThrift(request.fn.ret_type);
-    Type[] parameterTypes = new Type[request.fn.arg_types.size()];
-    for (int i = 0; i < request.fn.arg_types.size(); ++i) {
-      parameterTypes[i] = Type.fromThrift(request.fn.arg_types.get(i));
-    }
-    inputBufferPtr_ = request.input_buffer_ptr;
-    inputNullsPtr_ = request.input_nulls_ptr;
-    outputBufferPtr_ = request.output_buffer_ptr;
-    outputNullPtr_ = request.output_null_ptr;
-    outBufferStringPtr_ = 0;
-    outBufferCapacity_ = 0;
-    inputBufferOffsets_ = new int[request.input_byte_offsets.size()];
-    for (int i = 0; i < request.input_byte_offsets.size(); ++i) {
-      inputBufferOffsets_[i] = request.input_byte_offsets.get(i).intValue();
-    }
-
-    init(jarFile, className, retType, parameterTypes);
-  }
-
-  @Override
-  protected void finalize() throws Throwable {
-    close();
-    super.finalize();
-  }
-
-  /**
-   * Releases any resources allocated off the native heap.
-   */
-  public void close() {
-    UnsafeUtil.UNSAFE.freeMemory(outBufferStringPtr_);
-    outBufferStringPtr_ = 0;
-    outBufferCapacity_ = 0;
-  }
-
-  /**
-   * evaluate function called by the backend. The inputs to the UDF have
-   * been serialized to 'input'
-   */
-  public void evaluate() throws ImpalaRuntimeException {
-    try {
-      for (int i = 0; i < argTypes_.length; ++i) {
-        if (UnsafeUtil.UNSAFE.getByte(inputNullsPtr_ + i) == 0) {
-          switch (argTypes_[i]) {
-            case BOOLEAN_WRITABLE:
-            case BYTE_WRITABLE:
-            case SHORT_WRITABLE:
-            case INT_WRITABLE:
-            case LONG_WRITABLE:
-            case FLOAT_WRITABLE:
-            case DOUBLE_WRITABLE:
-            case BYTE_ARRAY:
-            case BYTES_WRITABLE:
-            case TEXT:
-              inputArgs_[i] = inputObjects_[i];
-              break;
-            case BOOLEAN:
-              inputArgs_[i] = ((ImpalaBooleanWritable)inputObjects_[i]).get();
-              break;
-            case TINYINT:
-              inputArgs_[i] = ((ImpalaTinyIntWritable)inputObjects_[i]).get();
-              break;
-            case SMALLINT:
-              inputArgs_[i] = ((ImpalaSmallIntWritable)inputObjects_[i]).get();
-              break;
-            case INT:
-              inputArgs_[i] = ((ImpalaIntWritable)inputObjects_[i]).get();
-              break;
-            case BIGINT:
-              inputArgs_[i] = ((ImpalaBigIntWritable)inputObjects_[i]).get();
-              break;
-            case FLOAT:
-              inputArgs_[i] = ((ImpalaFloatWritable)inputObjects_[i]).get();
-              break;
-            case DOUBLE:
-              inputArgs_[i] = ((ImpalaDoubleWritable)inputObjects_[i]).get();
-              break;
-            case STRING:
-              Preconditions.checkState(inputObjects_[i] instanceof ImpalaBytesWritable);
-              inputArgs_[i] =
-                  new String(((ImpalaBytesWritable)inputObjects_[i]).getBytes());
-              break;
-          }
-        } else {
-          inputArgs_[i] = null;
-        }
-      }
-      evaluate(inputArgs_);
-    } catch (Exception e) {
-      e.printStackTrace(System.err);
-      throw new ImpalaRuntimeException("UDF::evaluate() ran into a problem.", e);
-    }
-  }
-
-  /**
-   * Evalutes the UDF with 'args' as the input to the UDF. This is exposed
-   * for testing and not the version of evaluate() the backend uses.
-   */
-  public long evaluateForTesting(Object... args) throws ImpalaRuntimeException {
-    try {
-      Object[] inputArgs = new Object[args.length];
-      for (int i = 0; i < args.length; ++i) {
-        switch (argTypes_[i]) {
-          case BOOLEAN_WRITABLE:
-          case BYTE_WRITABLE:
-          case SHORT_WRITABLE:
-          case INT_WRITABLE:
-          case LONG_WRITABLE:
-          case FLOAT_WRITABLE:
-          case DOUBLE_WRITABLE:
-          case TEXT:
-          case BYTE_ARRAY:
-          case BYTES_WRITABLE:
-          case STRING:
-            inputArgs[i] = args[i];
-            break;
-          case BOOLEAN:
-            inputArgs[i] = ((ImpalaBooleanWritable)args[i]).get();
-            break;
-          case TINYINT:
-            inputArgs[i] = ((ImpalaTinyIntWritable)args[i]).get();
-            break;
-          case SMALLINT:
-            inputArgs[i] = ((ImpalaSmallIntWritable)args[i]).get();
-            break;
-          case INT:
-            inputArgs[i] = ((ImpalaIntWritable)args[i]).get();
-            break;
-          case BIGINT:
-            inputArgs[i] = ((ImpalaBigIntWritable)args[i]).get();
-            break;
-          case FLOAT:
-            inputArgs[i] = ((ImpalaFloatWritable)args[i]).get();
-            break;
-          case DOUBLE:
-            inputArgs[i] = ((ImpalaDoubleWritable)args[i]).get();
-            break;
-        }
-      }
-      return evaluate(inputArgs);
-    } catch (Exception e) {
-      e.printStackTrace(System.err);
-      throw new ImpalaRuntimeException("UDF::evaluate() ran into a problem.", e);
-    }
-  }
-
-  /**
-   * Evalutes the UDF with 'args' as the input to the UDF.
-   * Returns 0 if the udf returned NULL. (the result is a ptr so this is okay).
-   */
-  private long evaluate(Object... args) throws ImpalaRuntimeException {
-    try {
-      storeUdfResult(method_.invoke(udf_, args));
-      if (UnsafeUtil.UNSAFE.getByte(outputNullPtr_) == 1) return 0;
-      return outputBufferPtr_;
-    } catch (IllegalArgumentException e) {
-      throw new ImpalaRuntimeException("UDF failed to evaluate", e);
-    } catch (IllegalAccessException e) {
-      throw new ImpalaRuntimeException("UDF failed to evaluate", e);
-    } catch (InvocationTargetException e) {
-      throw new ImpalaRuntimeException("UDF failed to evaluate", e);
-    }
-  }
-
-  public Method getMethod() { return method_; }
-
-  // Sets the result object 'obj' into the outputBufferPtr_
-  private void storeUdfResult(Object obj) throws ImpalaRuntimeException {
-    if (obj == null) {
-      UnsafeUtil.UNSAFE.putByte(outputNullPtr_, (byte)1);
-      return;
-    }
-
-    UnsafeUtil.UNSAFE.putByte(outputNullPtr_, (byte)0);
-    switch (retType_) {
-      case BOOLEAN_WRITABLE: {
-        BooleanWritable val = (BooleanWritable)obj;
-        UnsafeUtil.UNSAFE.putByte(outputBufferPtr_, val.get() ? (byte)1 : 0);
-        return;
-      }
-      case BOOLEAN: {
-        UnsafeUtil.UNSAFE.putByte(outputBufferPtr_, (boolean)obj ? (byte)1 : 0);
-        return;
-      }
-      case BYTE_WRITABLE: {
-        ByteWritable val = (ByteWritable)obj;
-        UnsafeUtil.UNSAFE.putByte(outputBufferPtr_, val.get());
-        return;
-      }
-      case TINYINT: {
-        UnsafeUtil.UNSAFE.putByte(outputBufferPtr_, (byte)obj);
-        return;
-      }
-      case SHORT_WRITABLE: {
-        ShortWritable val = (ShortWritable)obj;
-        UnsafeUtil.UNSAFE.putShort(outputBufferPtr_, val.get());
-        return;
-      }
-      case SMALLINT: {
-        UnsafeUtil.UNSAFE.putShort(outputBufferPtr_, (short)obj);
-        return;
-      }
-      case INT_WRITABLE: {
-        IntWritable val = (IntWritable)obj;
-        UnsafeUtil.UNSAFE.putInt(outputBufferPtr_, val.get());
-        return;
-      }
-      case INT: {
-        UnsafeUtil.UNSAFE.putInt(outputBufferPtr_, (int)obj);
-        return;
-      }
-      case LONG_WRITABLE: {
-        LongWritable val = (LongWritable)obj;
-        UnsafeUtil.UNSAFE.putLong(outputBufferPtr_, val.get());
-        return;
-      }
-      case BIGINT: {
-        UnsafeUtil.UNSAFE.putLong(outputBufferPtr_, (long)obj);
-        return;
-      }
-      case FLOAT_WRITABLE: {
-        FloatWritable val = (FloatWritable)obj;
-        UnsafeUtil.UNSAFE.putFloat(outputBufferPtr_, val.get());
-        return;
-      }
-      case FLOAT: {
-        UnsafeUtil.UNSAFE.putFloat(outputBufferPtr_, (float)obj);
-        return;
-      }
-      case DOUBLE_WRITABLE: {
-        DoubleWritable val = (DoubleWritable)obj;
-        UnsafeUtil.UNSAFE.putDouble(outputBufferPtr_, val.get());
-        return;
-      }
-      case DOUBLE: {
-        UnsafeUtil.UNSAFE.putDouble(outputBufferPtr_, (double)obj);
-        return;
-      }
-      case TEXT: {
-        copyBytesToOutputBuffer(((Text)obj).copyBytes());
-        return;
-      }
-      case BYTE_ARRAY: {
-        copyBytesToOutputBuffer((byte[]) obj);
-        return;
-      }
-      case BYTES_WRITABLE: {
-        copyBytesToOutputBuffer(((BytesWritable)obj).copyBytes());
-        return;
-      }
-      case STRING: {
-        copyBytesToOutputBuffer(((String)obj).getBytes());
-        return;
-      }
-      default:
-        throw new ImpalaRuntimeException("Unsupported return type: " + retType_);
-    }
-  }
-
-  private void copyBytesToOutputBuffer(byte[] bytes) {
-    if (bytes.length > outBufferCapacity_) {
-      outBufferStringPtr_ =
-          UnsafeUtil.UNSAFE.reallocateMemory(outBufferStringPtr_, bytes.length);
-      outBufferCapacity_ = bytes.length;
-      UnsafeUtil.UNSAFE.putLong(outputBufferPtr_, outBufferStringPtr_);
-    }
-    UnsafeUtil.Copy(outBufferStringPtr_, bytes, 0, bytes.length);
-    UnsafeUtil.UNSAFE.putInt(
-        outputBufferPtr_ + ImpalaStringWritable.STRING_VALUE_LEN_OFFSET,
-        bytes.length);
-  }
-
-  // Preallocate the input objects that will be passed to the underlying UDF.
-  // These objects are allocated once and reused across calls to evaluate()
-  private void allocateInputObjects() throws ImpalaRuntimeException {
-    inputObjects_ = new Writable[argTypes_.length];
-    inputArgs_ = new Object[argTypes_.length];
-
-    for (int i = 0; i < argTypes_.length; ++i) {
-      int offset = inputBufferOffsets_[i];
-      switch (argTypes_[i]) {
-        case BOOLEAN:
-        case BOOLEAN_WRITABLE:
-          inputObjects_[i] = new ImpalaBooleanWritable(inputBufferPtr_ + offset);
-          break;
-        case TINYINT:
-        case BYTE_WRITABLE:
-          inputObjects_[i] = new ImpalaTinyIntWritable(inputBufferPtr_ + offset);
-          break;
-        case SMALLINT:
-        case SHORT_WRITABLE:
-          inputObjects_[i] = new ImpalaSmallIntWritable(inputBufferPtr_ + offset);
-          break;
-        case INT:
-        case INT_WRITABLE:
-          inputObjects_[i] = new ImpalaIntWritable(inputBufferPtr_ + offset);
-          break;
-        case BIGINT:
-        case LONG_WRITABLE:
-          inputObjects_[i] = new ImpalaBigIntWritable(inputBufferPtr_ + offset);
-          break;
-        case FLOAT:
-        case FLOAT_WRITABLE:
-          inputObjects_[i] = new ImpalaFloatWritable(inputBufferPtr_ + offset);
-          break;
-        case DOUBLE:
-        case DOUBLE_WRITABLE:
-          inputObjects_[i] = new ImpalaDoubleWritable(inputBufferPtr_ + offset);
-          break;
-        case TEXT:
-          inputObjects_[i] = new ImpalaTextWritable(inputBufferPtr_ + offset);
-          break;
-        case BYTES_WRITABLE:
-          inputObjects_[i] = new ImpalaBytesWritable(inputBufferPtr_ + offset);
-          break;
-        case STRING:
-          // String can be mapped to any String-like Writable class.
-          inputObjects_[i] = new ImpalaBytesWritable(inputBufferPtr_ + offset);
-          break;
-        default:
-          throw new ImpalaRuntimeException("Unsupported argument type: " + argTypes_[i]);
-      }
-    }
-  }
-
-  private ClassLoader getClassLoader(String jarPath) throws MalformedURLException {
-    if (jarPath == null) {
-      return ClassLoader.getSystemClassLoader();
-    } else {
-      URL url = new File(jarPath).toURI().toURL();
-      return URLClassLoader.newInstance(new URL[] { url }, getClass().getClassLoader());
-    }
-  }
-
-  /**
-   * Sets the return type of a Java UDF. Returns true if the return type is compatible
-   * with the return type from the function definition. Throws an ImpalaRuntimeException
-   * if the return type is not supported.
-   */
-  private boolean setReturnType(Type retType, Class<?> udfReturnType)
-      throws ImpalaRuntimeException {
-    if (!JavaUdfDataType.isSupported(retType)) {
-      throw new ImpalaRuntimeException("Unsupported return type: " + retType.toSql());
-    }
-    JavaUdfDataType javaType = JavaUdfDataType.getType(udfReturnType);
-    // Check if the evaluate method return type is compatible with the return type from
-    // the function definition. This happens when both of them map to the same primitive
-    // type.
-    if (retType.getPrimitiveType().toThrift() != javaType.getPrimitiveType()) {
-      return false;
-    }
-    retType_ = javaType;
-    return true;
-  }
-
-  /**
-   * Sets the argument types of a Java UDF. Returns true if the argument types specified
-   * in the UDF are compatible with the argument types of the evaluate() function loaded
-   * from the associated JAR file.
-   */
-  private boolean setArgTypes(Type[] parameterTypes, Class<?>[] udfArgTypes) {
-    Preconditions.checkNotNull(argTypes_);
-    for (int i = 0; i < udfArgTypes.length; ++i) {
-      argTypes_[i] = JavaUdfDataType.getType(udfArgTypes[i]);
-      if (argTypes_[i].getPrimitiveType()
-          != parameterTypes[i].getPrimitiveType().toThrift()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Initializes the UdfExecutor validating the UDF has the proper signature.
-   * This uses reflection to look up the "evaluate" function in the UDF class.
-   */
-  private void init(String jarPath, String udfPath,
-      Type retType, Type... parameterTypes) throws
-      ImpalaRuntimeException {
-    ArrayList<String> signatures = Lists.newArrayList();
-    try {
-      LOG.debug("Loading UDF '" + udfPath + "' from " + jarPath);
-      ClassLoader loader = getClassLoader(jarPath);
-      Class<?> c = Class.forName(udfPath, true, loader);
-      Class<? extends UDF> udfClass = c.asSubclass(UDF.class);
-      Constructor<? extends UDF> ctor = udfClass.getConstructor();
-      udf_ = ctor.newInstance();
-      argTypes_ = new JavaUdfDataType[parameterTypes.length];
-      Method[] methods = udfClass.getMethods();
-      for (Method m: methods) {
-        // By convention, the udf must contain the function "evaluate"
-        if (!m.getName().equals(UDF_FUNCTION_NAME)) continue;
-        signatures.add(m.toGenericString());
-        Class<?>[] methodTypes = m.getParameterTypes();
-
-        // Try to match the arguments
-        if (methodTypes.length != parameterTypes.length) continue;
-        if (methodTypes.length == 0 && parameterTypes.length == 0) {
-          // Special case where the UDF doesn't take any input args
-          method_ = m;
-          if (!setReturnType(retType, m.getReturnType())) continue;
-          setArgTypes(parameterTypes, methodTypes);
-          LOG.debug("Loaded UDF '" + udfPath + "' from " + jarPath);
-          return;
-        }
-
-        method_ = m;
-        if (!setReturnType(retType, m.getReturnType())) continue;
-        if (!setArgTypes(parameterTypes, methodTypes)) continue;
-        allocateInputObjects();
-        LOG.debug("Loaded UDF '" + udfPath + "' from " + jarPath);
-        return;
-      }
-
-      StringBuilder sb = new StringBuilder();
-      sb.append("Unable to find evaluate function with the correct signature: ")
-        .append(udfPath + ".evaluate(")
-        .append(Joiner.on(", ").join(parameterTypes))
-        .append(")\n")
-        .append("UDF contains: \n    ")
-        .append(Joiner.on("\n    ").join(signatures));
-      throw new ImpalaRuntimeException(sb.toString());
-    } catch (MalformedURLException e) {
-      throw new ImpalaRuntimeException("Unable load jar.", e);
-    } catch (SecurityException e) {
-      throw new ImpalaRuntimeException("Unable to load function.", e);
-    } catch (ClassNotFoundException e) {
-      throw new ImpalaRuntimeException("Unable to find class.", e);
-    } catch (NoSuchMethodException e) {
-      throw new ImpalaRuntimeException(
-          "Unable to find constructor with no arguments.", e);
-    } catch (IllegalArgumentException e) {
-      throw new ImpalaRuntimeException(
-          "Unable to call UDF constructor with no arguments.", e);
-    } catch (InstantiationException e) {
-      throw new ImpalaRuntimeException("Unable to call create UDF instance.", e);
-    } catch (IllegalAccessException e) {
-      throw new ImpalaRuntimeException("Unable to call create UDF instance.", e);
-    } catch (InvocationTargetException e) {
-      throw new ImpalaRuntimeException("Unable to call create UDF instance.", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/AggregationNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/AggregationNode.java b/fe/src/main/java/com/cloudera/impala/planner/AggregationNode.java
deleted file mode 100644
index f6bf8a0..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/AggregationNode.java
+++ /dev/null
@@ -1,292 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.FunctionCallExpr;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TAggregationNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Aggregation computation.
- *
- */
-public class AggregationNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(AggregationNode.class);
-
-  // Default per-host memory requirement used if no valid stats are available.
-  // TODO: Come up with a more useful heuristic.
-  private final static long DEFAULT_PER_HOST_MEM = 128L * 1024L * 1024L;
-
-  // Conservative minimum size of hash table for low-cardinality aggregations.
-  private final static long MIN_HASH_TBL_MEM = 10L * 1024L * 1024L;
-
-  private final AggregateInfo aggInfo_;
-
-  // Set to true if this aggregation node needs to run the Finalize step. This
-  // node is the root node of a distributed aggregation.
-  private boolean needsFinalize_;
-
-  // If true, use streaming preaggregation algorithm. Not valid if this is a merge agg.
-  private boolean useStreamingPreagg_;
-
-  /**
-   * Create an agg node from aggInfo.
-   */
-  public AggregationNode(PlanNodeId id, PlanNode input, AggregateInfo aggInfo) {
-    super(id, aggInfo.getOutputTupleId().asList(), "AGGREGATE");
-    aggInfo_ = aggInfo;
-    children_.add(input);
-    needsFinalize_ = true;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  private AggregationNode(PlanNodeId id, AggregationNode src) {
-    super(id, src, "AGGREGATE");
-    aggInfo_ = src.aggInfo_;
-    needsFinalize_ = src.needsFinalize_;
-  }
-
-  public AggregateInfo getAggInfo() { return aggInfo_; }
-
-  /**
-   * Unsets this node as requiring finalize. Only valid to call this if it is
-   * currently marked as needing finalize.
-   */
-  public void unsetNeedsFinalize() {
-    Preconditions.checkState(needsFinalize_);
-    needsFinalize_ = false;
-  }
-
-  /**
-   * Sets this node as a preaggregation. Only valid to call this if it is not marked
-   * as a preaggregation
-   */
-  public void setIsPreagg(PlannerContext ctx_) {
-    TQueryOptions query_options = ctx_.getQueryOptions();
-    useStreamingPreagg_ =  !query_options.disable_streaming_preaggregations &&
-        aggInfo_.getGroupingExprs().size() > 0;
-  }
-
-  /**
-   * Have this node materialize the aggregation's intermediate tuple instead of
-   * the output tuple.
-   */
-  public void setIntermediateTuple() {
-    Preconditions.checkState(!tupleIds_.isEmpty());
-    Preconditions.checkState(tupleIds_.get(0).equals(aggInfo_.getOutputTupleId()));
-    tupleIds_.clear();
-    tupleIds_.add(aggInfo_.getIntermediateTupleId());
-  }
-
-  @Override
-  public boolean isBlockingNode() { return !useStreamingPreagg_; }
-
-  @Override
-  public void init(Analyzer analyzer) throws InternalException {
-    // Assign predicates to the top-most agg in the single-node plan that can evaluate
-    // them, as follows: For non-distinct aggs place them in the 1st phase agg node. For
-    // distinct aggs place them in the 2nd phase agg node. The conjuncts are
-    // transferred to the proper place in the multi-node plan via transferConjuncts().
-    if (tupleIds_.get(0).equals(aggInfo_.getResultTupleId()) && !aggInfo_.isMerge()) {
-      // Ignore predicates bound by a grouping slot produced by a SlotRef grouping expr.
-      // Those predicates are already evaluated below this agg node (e.g., in a scan),
-      // because the grouping slot must be in the same equivalence class as another slot
-      // below this agg node. We must not ignore other grouping slots in order to retain
-      // conjuncts bound by those grouping slots in createEquivConjuncts() (IMPALA-2089).
-      // Those conjuncts cannot be redundant because our equivalence classes do not
-      // capture dependencies with non-SlotRef exprs.
-      Set<SlotId> groupBySlots = Sets.newHashSet();
-      for (int i = 0; i < aggInfo_.getGroupingExprs().size(); ++i) {
-        if (aggInfo_.getGroupingExprs().get(i).unwrapSlotRef(true) == null) continue;
-        groupBySlots.add(aggInfo_.getOutputTupleDesc().getSlots().get(i).getId());
-      }
-      ArrayList<Expr> bindingPredicates =
-          analyzer.getBoundPredicates(tupleIds_.get(0), groupBySlots, true);
-      conjuncts_.addAll(bindingPredicates);
-
-      // also add remaining unassigned conjuncts_
-      assignConjuncts(analyzer);
-
-      analyzer.createEquivConjuncts(tupleIds_.get(0), conjuncts_, groupBySlots);
-    }
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    // Compute the mem layout for both tuples here for simplicity.
-    aggInfo_.getOutputTupleDesc().computeMemLayout();
-    aggInfo_.getIntermediateTupleDesc().computeMemLayout();
-
-    // do this at the end so it can take all conjuncts into account
-    computeStats(analyzer);
-
-    // don't call createDefaultSMap(), it would point our conjuncts (= Having clause)
-    // to our input; our conjuncts don't get substituted because they already
-    // refer to our output
-    outputSmap_ = getCombinedChildSmap();
-    aggInfo_.substitute(outputSmap_, analyzer);
-    // assert consistent aggregate expr and slot materialization
-    aggInfo_.checkConsistency();
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    // This is prone to overflow, because we keep multiplying cardinalities,
-    // even if the grouping exprs are functionally dependent (example:
-    // group by the primary key of a table plus a number of other columns from that
-    // same table)
-    // TODO: try to recognize functional dependencies
-    // TODO: as a shortcut, instead of recognizing functional dependencies,
-    // limit the contribution of a single table to the number of rows
-    // of that table (so that when we're grouping by the primary key col plus
-    // some others, the estimate doesn't overshoot dramatically)
-    // cardinality: product of # of distinct values produced by grouping exprs
-
-    // Any non-grouping aggregation has at least one distinct value
-    cardinality_ = aggInfo_.getGroupingExprs().isEmpty() ? 1 :
-      Expr.getNumDistinctValues(aggInfo_.getGroupingExprs());
-    // take HAVING predicate into account
-    LOG.trace("Agg: cardinality=" + Long.toString(cardinality_));
-    if (cardinality_ > 0) {
-      cardinality_ = Math.round((double) cardinality_ * computeSelectivity());
-      LOG.trace("sel=" + Double.toString(computeSelectivity()));
-    }
-    // if we ended up with an overflow, the estimate is certain to be wrong
-    if (cardinality_ < 0) cardinality_ = -1;
-    // Sanity check the cardinality_ based on the input cardinality_.
-    if (getChild(0).getCardinality() != -1) {
-      if (cardinality_ == -1) {
-        // A worst-case cardinality_ is better than an unknown cardinality_.
-        cardinality_ = getChild(0).getCardinality();
-      } else {
-        // An AggregationNode cannot increase the cardinality_.
-        cardinality_ = Math.min(getChild(0).getCardinality(), cardinality_);
-      }
-    }
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.trace("stats Agg: cardinality=" + Long.toString(cardinality_));
-  }
-
-  @Override
-  protected String debugString() {
-    return Objects.toStringHelper(this)
-        .add("aggInfo", aggInfo_.debugString())
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.AGGREGATION_NODE;
-
-    List<TExpr> aggregateFunctions = Lists.newArrayList();
-    // only serialize agg exprs that are being materialized
-    for (FunctionCallExpr e: aggInfo_.getMaterializedAggregateExprs()) {
-      aggregateFunctions.add(e.treeToThrift());
-    }
-    aggInfo_.checkConsistency();
-    msg.agg_node = new TAggregationNode(
-        aggregateFunctions,
-        aggInfo_.getIntermediateTupleId().asInt(),
-        aggInfo_.getOutputTupleId().asInt(), needsFinalize_,
-        useStreamingPreagg_,
-        getChild(0).getCardinality());
-    List<Expr> groupingExprs = aggInfo_.getGroupingExprs();
-    if (groupingExprs != null) {
-      msg.agg_node.setGrouping_exprs(Expr.treesToThrift(groupingExprs));
-    }
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    if (useStreamingPreagg_) return "STREAMING";
-    if (needsFinalize_) return "FINALIZE";
-    return null;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    String nameDetail = getDisplayLabelDetail();
-    output.append(String.format("%s%s", prefix, getDisplayLabel()));
-    if (nameDetail != null) output.append(" [" + nameDetail + "]");
-    output.append("\n");
-
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      ArrayList<FunctionCallExpr> aggExprs = aggInfo_.getMaterializedAggregateExprs();
-      if (!aggExprs.isEmpty()) {
-        output.append(detailPrefix + "output: ")
-            .append(getExplainString(aggExprs) + "\n");
-      }
-      // TODO: is this the best way to display this. It currently would
-      // have DISTINCT_PC(DISTINCT_PC(col)) for the merge phase but not
-      // very obvious what that means if you don't already know.
-
-      // TODO: group by can be very long. Break it into multiple lines
-      if (!aggInfo_.getGroupingExprs().isEmpty()) {
-        output.append(detailPrefix + "group by: ")
-            .append(getExplainString(aggInfo_.getGroupingExprs()) + "\n");
-      }
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "having: ")
-            .append(getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    Preconditions.checkNotNull(fragment_,
-        "PlanNode must be placed into a fragment before calling this method.");
-    perHostMemCost_ = 0;
-    long perHostCardinality = fragment_.getNumDistinctValues(aggInfo_.getGroupingExprs());
-    if (perHostCardinality == -1) {
-      perHostMemCost_ = DEFAULT_PER_HOST_MEM;
-      return;
-    }
-
-    // Per-host cardinality cannot be greater than the total output cardinality.
-    if (cardinality_ != -1) {
-      perHostCardinality = Math.min(perHostCardinality, cardinality_);
-    }
-    perHostMemCost_ += Math.max(perHostCardinality * avgRowSize_ *
-        PlannerContext.HASH_TBL_SPACE_OVERHEAD, MIN_HASH_TBL_MEM);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/AnalyticEvalNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/AnalyticEvalNode.java b/fe/src/main/java/com/cloudera/impala/planner/AnalyticEvalNode.java
deleted file mode 100644
index ccbdaa2..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/AnalyticEvalNode.java
+++ /dev/null
@@ -1,249 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AnalyticWindow;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.OrderByElement;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.thrift.TAnalyticNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Computation of analytic exprs.
- */
-public class AnalyticEvalNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(AnalyticEvalNode.class);
-
-  private List<Expr> analyticFnCalls_;
-
-  // Partitioning exprs from the AnalyticInfo
-  private final List<Expr> partitionExprs_;
-
-  // TODO: Remove when the BE uses partitionByLessThan rather than the exprs
-  private List<Expr> substitutedPartitionExprs_;
-  private List<OrderByElement> orderByElements_;
-  private final AnalyticWindow analyticWindow_;
-
-  // Physical tuples used/produced by this analytic node.
-  private final TupleDescriptor intermediateTupleDesc_;
-  private final TupleDescriptor outputTupleDesc_;
-
-  // maps from the logical output slots in logicalTupleDesc_ to their corresponding
-  // physical output slots in outputTupleDesc_
-  private final ExprSubstitutionMap logicalToPhysicalSmap_;
-
-  // predicates constructed from partitionExprs_/orderingExprs_ to
-  // compare input to buffered tuples
-  private final Expr partitionByEq_;
-  private final Expr orderByEq_;
-  private final TupleDescriptor bufferedTupleDesc_;
-
-  public AnalyticEvalNode(
-      PlanNodeId id, PlanNode input, List<Expr> analyticFnCalls,
-      List<Expr> partitionExprs, List<OrderByElement> orderByElements,
-      AnalyticWindow analyticWindow, TupleDescriptor intermediateTupleDesc,
-      TupleDescriptor outputTupleDesc, ExprSubstitutionMap logicalToPhysicalSmap,
-      Expr partitionByEq, Expr orderByEq, TupleDescriptor bufferedTupleDesc) {
-    super(id, "ANALYTIC");
-    Preconditions.checkState(!tupleIds_.contains(outputTupleDesc.getId()));
-    analyticFnCalls_ = analyticFnCalls;
-    partitionExprs_ = partitionExprs;
-    orderByElements_ = orderByElements;
-    analyticWindow_ = analyticWindow;
-    intermediateTupleDesc_ = intermediateTupleDesc;
-    outputTupleDesc_ = outputTupleDesc;
-    logicalToPhysicalSmap_ = logicalToPhysicalSmap;
-    partitionByEq_ = partitionByEq;
-    orderByEq_ = orderByEq;
-    bufferedTupleDesc_ = bufferedTupleDesc;
-    children_.add(input);
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    clearTupleIds();
-    tupleIds_.addAll(getChild(0).getTupleIds());
-    // we're materializing the input row augmented with the analytic output tuple
-    tupleIds_.add(outputTupleDesc_.getId());
-    nullableTupleIds_.addAll(getChild(0).getNullableTupleIds());
-  }
-
-  @Override
-  public boolean isBlockingNode() { return true; }
-  public List<Expr> getPartitionExprs() { return partitionExprs_; }
-  public List<OrderByElement> getOrderByElements() { return orderByElements_; }
-
-  @Override
-  public void init(Analyzer analyzer) {
-    Preconditions.checkState(conjuncts_.isEmpty());
-    computeMemLayout(analyzer);
-    intermediateTupleDesc_.computeMemLayout();
-
-    // we add the analyticInfo's smap to the combined smap of our child
-    outputSmap_ = logicalToPhysicalSmap_;
-    createDefaultSmap(analyzer);
-
-    // Do not assign any conjuncts here: the conjuncts out of our SelectStmt's
-    // Where clause have already been assigned, and conjuncts coming out of an
-    // enclosing scope need to be evaluated *after* all analytic computations.
-
-    // do this at the end so it can take all conjuncts into account
-    computeStats(analyzer);
-
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
-
-    // point fn calls, partition and ordering exprs at our input
-    ExprSubstitutionMap childSmap = getCombinedChildSmap();
-    analyticFnCalls_ = Expr.substituteList(analyticFnCalls_, childSmap, analyzer, false);
-    substitutedPartitionExprs_ = Expr.substituteList(partitionExprs_, childSmap,
-        analyzer, false);
-    orderByElements_ = OrderByElement.substitute(orderByElements_, childSmap, analyzer);
-    LOG.trace("evalnode: " + debugString());
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = getChild(0).cardinality_;
-    cardinality_ = capAtLimit(cardinality_);
-  }
-
-  @Override
-  protected String debugString() {
-    List<String> orderByElementStrs = Lists.newArrayList();
-    for (OrderByElement element: orderByElements_) {
-      orderByElementStrs.add(element.toSql());
-    }
-    return Objects.toStringHelper(this)
-        .add("analyticFnCalls", Expr.debugString(analyticFnCalls_))
-        .add("partitionExprs", Expr.debugString(partitionExprs_))
-        .add("subtitutedPartitionExprs", Expr.debugString(substitutedPartitionExprs_))
-        .add("orderByElements", Joiner.on(", ").join(orderByElementStrs))
-        .add("window", analyticWindow_)
-        .add("intermediateTid", intermediateTupleDesc_.getId())
-        .add("outputTid", outputTupleDesc_.getId())
-        .add("partitionByEq",
-            partitionByEq_ != null ? partitionByEq_.debugString() : "null")
-        .add("orderByEq",
-            orderByEq_ != null ? orderByEq_.debugString() : "null")
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.ANALYTIC_EVAL_NODE;
-    msg.analytic_node = new TAnalyticNode();
-    msg.analytic_node.setIntermediate_tuple_id(intermediateTupleDesc_.getId().asInt());
-    msg.analytic_node.setOutput_tuple_id(outputTupleDesc_.getId().asInt());
-    msg.analytic_node.setPartition_exprs(Expr.treesToThrift(substitutedPartitionExprs_));
-    msg.analytic_node.setOrder_by_exprs(
-        Expr.treesToThrift(OrderByElement.getOrderByExprs(orderByElements_)));
-    msg.analytic_node.setAnalytic_functions(Expr.treesToThrift(analyticFnCalls_));
-    if (analyticWindow_ == null) {
-      if (!orderByElements_.isEmpty()) {
-        msg.analytic_node.setWindow(AnalyticWindow.DEFAULT_WINDOW.toThrift());
-      }
-    } else {
-      // TODO: Window boundaries should have range_offset_predicate set
-      msg.analytic_node.setWindow(analyticWindow_.toThrift());
-    }
-    if (partitionByEq_ != null) {
-      msg.analytic_node.setPartition_by_eq(partitionByEq_.treeToThrift());
-    }
-    if (orderByEq_ != null) {
-      msg.analytic_node.setOrder_by_eq(orderByEq_.treeToThrift());
-    }
-    if (bufferedTupleDesc_ != null) {
-      msg.analytic_node.setBuffered_tuple_id(bufferedTupleDesc_.getId().asInt());
-    }
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s", prefix, getDisplayLabel()));
-    output.append("\n");
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      output.append(detailPrefix + "functions: ");
-      List<String> strings = Lists.newArrayList();
-      for (Expr fnCall: analyticFnCalls_) {
-        strings.add(fnCall.toSql());
-      }
-      output.append(Joiner.on(", ").join(strings));
-      output.append("\n");
-
-      if (!partitionExprs_.isEmpty()) {
-        output.append(detailPrefix + "partition by: ");
-        strings.clear();
-        for (Expr partitionExpr: partitionExprs_) {
-          strings.add(partitionExpr.toSql());
-        }
-        output.append(Joiner.on(", ").join(strings));
-        output.append("\n");
-      }
-
-      if (!orderByElements_.isEmpty()) {
-        output.append(detailPrefix + "order by: ");
-        strings.clear();
-        for (OrderByElement element: orderByElements_) {
-          strings.add(element.toSql());
-        }
-        output.append(Joiner.on(", ").join(strings));
-        output.append("\n");
-      }
-
-      if (analyticWindow_ != null) {
-        output.append(detailPrefix + "window: ");
-        output.append(analyticWindow_.toSql());
-        output.append("\n");
-      }
-
-      if (!conjuncts_.isEmpty()) {
-        output.append(
-            detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    Preconditions.checkNotNull(fragment_,
-        "PlanNode must be placed into a fragment before calling this method.");
-    // TODO: come up with estimate based on window
-    perHostMemCost_ = 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/AnalyticPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/AnalyticPlanner.java b/fe/src/main/java/com/cloudera/impala/planner/AnalyticPlanner.java
deleted file mode 100644
index c02096e..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/AnalyticPlanner.java
+++ /dev/null
@@ -1,815 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AggregateInfoBase;
-import com.cloudera.impala.analysis.AnalyticExpr;
-import com.cloudera.impala.analysis.AnalyticInfo;
-import com.cloudera.impala.analysis.AnalyticWindow;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.CompoundPredicate.Operator;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.OrderByElement;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-
-/**
- * The analytic planner adds plan nodes to an existing plan tree in order to
- * implement the AnalyticInfo of a given query stmt. The resulting plan reflects
- * similarities among analytic exprs with respect to partitioning, ordering and
- * windowing to reduce data exchanges and sorts (the exchanges and sorts are currently
- * not minimal). The generated plan has the following structure:
- * ...
- * (
- *  (
- *    (
- *      analytic node  <-- group of analytic exprs with compatible window
- *    )+               <-- group of analytic exprs with compatible ordering
- *    sort node?
- *  )+                 <-- group of analytic exprs with compatible partitioning
- *  hash exchange?
- * )*                  <-- group of analytic exprs that have different partitioning
- * input plan node
- * ...
- */
-public class AnalyticPlanner {
-  private final static Logger LOG = LoggerFactory.getLogger(AnalyticPlanner.class);
-
-  private final AnalyticInfo analyticInfo_;
-  private final Analyzer analyzer_;
-  private final PlannerContext ctx_;
-
-  public AnalyticPlanner(AnalyticInfo analyticInfo, Analyzer analyzer,
-      PlannerContext ctx) {
-    analyticInfo_ = analyticInfo;
-    analyzer_ = analyzer;
-    ctx_ = ctx;
-  }
-
-  /**
-   * Return plan tree that augments 'root' with plan nodes that implement single-node
-   * evaluation of the AnalyticExprs in analyticInfo.
-   * This plan takes into account a possible hash partition of its input on
-   * 'groupingExprs'; if this is non-null, it returns in 'inputPartitionExprs'
-   * a subset of the grouping exprs which should be used for the aggregate
-   * hash partitioning during the parallelization of 'root'.
-   * TODO: when generating sort orders for the sort groups, optimize the ordering
-   * of the partition exprs (so that subsequent sort operations see the input sorted
-   * on a prefix of their required sort exprs)
-   * TODO: when merging sort groups, recognize equivalent exprs
-   * (using the equivalence classes) rather than looking for expr equality
-   */
-  public PlanNode createSingleNodePlan(PlanNode root,
-      List<Expr> groupingExprs, List<Expr> inputPartitionExprs) throws ImpalaException {
-    List<WindowGroup> windowGroups = collectWindowGroups();
-    for (int i = 0; i < windowGroups.size(); ++i) {
-      windowGroups.get(i).init(analyzer_, "wg-" + i);
-    }
-    List<SortGroup> sortGroups = collectSortGroups(windowGroups);
-    mergeSortGroups(sortGroups);
-    for (SortGroup g: sortGroups) {
-      g.init();
-    }
-    List<PartitionGroup> partitionGroups = collectPartitionGroups(sortGroups);
-    mergePartitionGroups(partitionGroups, root.getNumNodes());
-    orderGroups(partitionGroups);
-    if (groupingExprs != null) {
-      Preconditions.checkNotNull(inputPartitionExprs);
-      computeInputPartitionExprs(
-          partitionGroups, groupingExprs, root.getNumNodes(), inputPartitionExprs);
-    }
-
-    for (PartitionGroup partitionGroup: partitionGroups) {
-      for (int i = 0; i < partitionGroup.sortGroups.size(); ++i) {
-        root = createSortGroupPlan(root, partitionGroup.sortGroups.get(i),
-            i == 0 ? partitionGroup.partitionByExprs : null);
-      }
-    }
-
-    // create equiv classes for newly added slots
-    analyzer_.createIdentityEquivClasses();
-    return root;
-  }
-
-  /**
-   * Coalesce sort groups that have compatible partition-by exprs and
-   * have a prefix relationship.
-   */
-  private void mergeSortGroups(List<SortGroup> sortGroups) {
-    boolean hasMerged = false;
-    do {
-      hasMerged = false;
-      for (SortGroup sg1: sortGroups) {
-        for (SortGroup sg2: sortGroups) {
-          if (sg1 != sg2 && sg1.isPrefixOf(sg2)) {
-            sg1.absorb(sg2);
-            sortGroups.remove(sg2);
-            hasMerged = true;
-            break;
-          }
-        }
-        if (hasMerged) break;
-      }
-    } while (hasMerged);
-  }
-
-  /**
-   * Coalesce partition groups for which the intersection of their
-   * partition exprs has ndv estimate > numNodes, so that the resulting plan
-   * still parallelizes across all nodes.
-   */
-  private void mergePartitionGroups(
-      List<PartitionGroup> partitionGroups, int numNodes) {
-    boolean hasMerged = false;
-    do {
-      hasMerged = false;
-      for (PartitionGroup pg1: partitionGroups) {
-        for (PartitionGroup pg2: partitionGroups) {
-          if (pg1 != pg2) {
-            long ndv = Expr.getNumDistinctValues(
-                Expr.intersect(pg1.partitionByExprs, pg2.partitionByExprs));
-            if (ndv == -1 || ndv < 0 || ndv < numNodes) {
-              // didn't get a usable value or the number of partitions is too small
-              continue;
-            }
-            pg1.merge(pg2);
-            partitionGroups.remove(pg2);
-            hasMerged = true;
-            break;
-          }
-        }
-        if (hasMerged) break;
-      }
-    } while (hasMerged);
-  }
-
-  /**
-   * Determine the partition group that has the maximum intersection in terms
-   * of the estimated ndv of the partition exprs with groupingExprs.
-   * That partition group is placed at the front of partitionGroups, with its
-   * partition exprs reduced to the intersection, and the intersecting groupingExprs
-   * are returned in inputPartitionExprs.
-   */
-  private void computeInputPartitionExprs(List<PartitionGroup> partitionGroups,
-      List<Expr> groupingExprs, int numNodes, List<Expr> inputPartitionExprs) {
-    inputPartitionExprs.clear();
-    Preconditions.checkState(numNodes != -1);
-    // find partition group with maximum intersection
-    long maxNdv = 0;
-    PartitionGroup maxPg = null;
-    List<Expr> maxGroupingExprs = null;
-    for (PartitionGroup pg: partitionGroups) {
-      List<Expr> l1 = Lists.newArrayList();
-      List<Expr> l2 = Lists.newArrayList();
-      Expr.intersect(analyzer_, pg.partitionByExprs, groupingExprs,
-          analyzer_.getEquivClassSmap(), l1, l2);
-      // TODO: also look at l2 and take the max?
-      long ndv = Expr.getNumDistinctValues(l1);
-      if (ndv < 0 || ndv < numNodes || ndv < maxNdv) continue;
-      // found a better partition group
-      maxPg = pg;
-      maxPg.partitionByExprs = l1;
-      maxGroupingExprs = l2;
-      maxNdv = ndv;
-    }
-
-    if (maxNdv > numNodes) {
-      Preconditions.checkNotNull(maxPg);
-      // we found a partition group that gives us enough parallelism;
-      // move it to the front
-      partitionGroups.remove(maxPg);
-      partitionGroups.add(0, maxPg);
-      inputPartitionExprs.addAll(maxGroupingExprs);
-    }
-  }
-
-  /**
-   * Order partition groups (and the sort groups within them) by increasing
-   * totalOutputTupleSize. This minimizes the total volume of data that needs to be
-   * repartitioned and sorted.
-   * Also move the non-partitioning partition group to the end.
-   */
-  private void orderGroups(List<PartitionGroup> partitionGroups) {
-    // remove the non-partitioning group from partitionGroups
-    PartitionGroup nonPartitioning = null;
-    for (PartitionGroup pg: partitionGroups) {
-      if (pg.partitionByExprs.isEmpty()) {
-        nonPartitioning = pg;
-        break;
-      }
-    }
-    if (nonPartitioning != null) partitionGroups.remove(nonPartitioning);
-
-    // order by ascending combined output tuple size
-    Collections.sort(partitionGroups,
-        new Comparator<PartitionGroup>() {
-          public int compare(PartitionGroup pg1, PartitionGroup pg2) {
-            Preconditions.checkState(pg1.totalOutputTupleSize > 0);
-            Preconditions.checkState(pg2.totalOutputTupleSize > 0);
-            int diff = pg1.totalOutputTupleSize - pg2.totalOutputTupleSize;
-            return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-          }
-        });
-    if (nonPartitioning != null) partitionGroups.add(nonPartitioning);
-
-    for (PartitionGroup pg: partitionGroups) {
-      pg.orderSortGroups();
-    }
-  }
-
-  /**
-   * Create SortInfo, including sort tuple, to sort entire input row
-   * on sortExprs.
-   */
-  private SortInfo createSortInfo(
-      PlanNode input, List<Expr> sortExprs, List<Boolean> isAsc,
-      List<Boolean> nullsFirst) {
-    // create tuple for sort output = the entire materialized input in a single tuple
-    TupleDescriptor sortTupleDesc =
-        analyzer_.getDescTbl().createTupleDescriptor("sort-tuple");
-    ExprSubstitutionMap sortSmap = new ExprSubstitutionMap();
-    List<Expr> sortSlotExprs = Lists.newArrayList();
-    sortTupleDesc.setIsMaterialized(true);
-    for (TupleId tid: input.getTupleIds()) {
-      TupleDescriptor tupleDesc = analyzer_.getTupleDesc(tid);
-      for (SlotDescriptor inputSlotDesc: tupleDesc.getSlots()) {
-        if (!inputSlotDesc.isMaterialized()) continue;
-        SlotDescriptor sortSlotDesc =
-            analyzer_.copySlotDescriptor(inputSlotDesc, sortTupleDesc);
-        // all output slots need to be materialized
-        sortSlotDesc.setIsMaterialized(true);
-        sortSmap.put(new SlotRef(inputSlotDesc), new SlotRef(sortSlotDesc));
-        sortSlotExprs.add(new SlotRef(inputSlotDesc));
-      }
-    }
-
-    // Lhs exprs to be substituted in ancestor plan nodes could have a rhs that contains
-    // TupleIsNullPredicates. TupleIsNullPredicates require specific tuple ids for
-    // evaluation. Since this sort materializes a new tuple, it's impossible to evaluate
-    // TupleIsNullPredicates referring to this sort's input after this sort,
-    // To preserve the information whether an input tuple was null or not this sort node,
-    // we materialize those rhs TupleIsNullPredicates, which are then substituted
-    // by a SlotRef into the sort's tuple in ancestor nodes (IMPALA-1519).
-    ExprSubstitutionMap inputSmap = input.getOutputSmap();
-    if (inputSmap != null) {
-      List<Expr> tupleIsNullPredsToMaterialize = Lists.newArrayList();
-      for (int i = 0; i < inputSmap.size(); ++i) {
-        Expr rhsExpr = inputSmap.getRhs().get(i);
-        // Ignore substitutions that are irrelevant at this plan node and its ancestors.
-        if (!rhsExpr.isBoundByTupleIds(input.getTupleIds())) continue;
-        rhsExpr.collect(TupleIsNullPredicate.class, tupleIsNullPredsToMaterialize);
-      }
-      Expr.removeDuplicates(tupleIsNullPredsToMaterialize);
-
-      // Materialize relevant unique TupleIsNullPredicates.
-      for (Expr tupleIsNullPred: tupleIsNullPredsToMaterialize) {
-        SlotDescriptor sortSlotDesc = analyzer_.addSlotDescriptor(sortTupleDesc);
-        sortSlotDesc.setType(tupleIsNullPred.getType());
-        sortSlotDesc.setIsMaterialized(true);
-        sortSlotDesc.setSourceExpr(tupleIsNullPred);
-        sortSlotDesc.setLabel(tupleIsNullPred.toSql());
-        sortSlotExprs.add(tupleIsNullPred.clone());
-      }
-    }
-
-    SortInfo sortInfo = new SortInfo(
-        Expr.substituteList(sortExprs, sortSmap, analyzer_, false), isAsc, nullsFirst);
-    LOG.trace("sortinfo exprs: " + Expr.debugString(sortInfo.getOrderingExprs()));
-    sortInfo.setMaterializedTupleInfo(sortTupleDesc, sortSlotExprs);
-    return sortInfo;
-  }
-
-  /**
-   * Create plan tree for the entire sort group, including all contained window groups.
-   * Marks the SortNode as requiring its input to be partitioned if partitionExprs
-   * is not null (partitionExprs represent the data partition of the entire partition
-   * group of which this sort group is a part).
-   */
-  private PlanNode createSortGroupPlan(PlanNode root, SortGroup sortGroup,
-      List<Expr> partitionExprs) throws ImpalaException {
-    List<Expr> partitionByExprs = sortGroup.partitionByExprs;
-    List<OrderByElement> orderByElements = sortGroup.orderByElements;
-    ExprSubstitutionMap sortSmap = null;
-    TupleId sortTupleId = null;
-    TupleDescriptor bufferedTupleDesc = null;
-    // map from input to buffered tuple
-    ExprSubstitutionMap bufferedSmap = new ExprSubstitutionMap();
-
-    // sort on partition by (pb) + order by (ob) exprs and create pb/ob predicates
-    if (!partitionByExprs.isEmpty() || !orderByElements.isEmpty()) {
-      // first sort on partitionExprs (direction doesn't matter)
-      List<Expr> sortExprs = Lists.newArrayList(partitionByExprs);
-      List<Boolean> isAsc =
-          Lists.newArrayList(Collections.nCopies(sortExprs.size(), new Boolean(true)));
-      // TODO: utilize a direction and nulls/first last that has benefit
-      // for subsequent sort groups
-      List<Boolean> nullsFirst =
-          Lists.newArrayList(Collections.nCopies(sortExprs.size(), new Boolean(true)));
-
-      // then sort on orderByExprs
-      for (OrderByElement orderByElement: sortGroup.orderByElements) {
-        sortExprs.add(orderByElement.getExpr());
-        isAsc.add(orderByElement.isAsc());
-        nullsFirst.add(orderByElement.getNullsFirstParam());
-      }
-
-      SortInfo sortInfo = createSortInfo(root, sortExprs, isAsc, nullsFirst);
-      SortNode sortNode = new SortNode(ctx_.getNextNodeId(), root, sortInfo, false, 0);
-
-      // if this sort group does not have partitioning exprs, we want the sort
-      // to be executed like a regular distributed sort
-      if (!partitionByExprs.isEmpty()) sortNode.setIsAnalyticSort(true);
-
-      if (partitionExprs != null) {
-        // create required input partition
-        DataPartition inputPartition = DataPartition.UNPARTITIONED;
-        if (!partitionExprs.isEmpty()) {
-          inputPartition = DataPartition.hashPartitioned(partitionExprs);
-        }
-        sortNode.setInputPartition(inputPartition);
-      }
-
-      root = sortNode;
-      root.init(analyzer_);
-      sortSmap = sortNode.getOutputSmap();
-
-      // create bufferedTupleDesc and bufferedSmap
-      sortTupleId = sortNode.tupleIds_.get(0);
-      bufferedTupleDesc =
-          analyzer_.getDescTbl().copyTupleDescriptor(sortTupleId, "buffered-tuple");
-      LOG.trace("desctbl: " + analyzer_.getDescTbl().debugString());
-
-      List<SlotDescriptor> inputSlots = analyzer_.getTupleDesc(sortTupleId).getSlots();
-      List<SlotDescriptor> bufferedSlots = bufferedTupleDesc.getSlots();
-      for (int i = 0; i < inputSlots.size(); ++i) {
-        bufferedSmap.put(
-            new SlotRef(inputSlots.get(i)), new SlotRef(bufferedSlots.get(i)));
-      }
-    }
-
-    // create one AnalyticEvalNode per window group
-    for (WindowGroup windowGroup: sortGroup.windowGroups) {
-      // Create partition-by (pb) and order-by (ob) less-than predicates between the
-      // input tuple (the output of the preceding sort) and a buffered tuple that is
-      // identical to the input tuple. We need a different tuple descriptor for the
-      // buffered tuple because the generated predicates should compare two different
-      // tuple instances from the same input stream (i.e., the predicates should be
-      // evaluated over a row that is composed of the input and the buffered tuple).
-
-      // we need to remap the pb/ob exprs to a) the sort output, b) our buffer of the
-      // sort input
-      Expr partitionByEq = null;
-      if (!windowGroup.partitionByExprs.isEmpty()) {
-        partitionByEq = createNullMatchingEquals(
-            Expr.substituteList(windowGroup.partitionByExprs, sortSmap, analyzer_, false),
-            sortTupleId, bufferedSmap);
-        LOG.trace("partitionByEq: " + partitionByEq.debugString());
-      }
-      Expr orderByEq = null;
-      if (!windowGroup.orderByElements.isEmpty()) {
-        orderByEq = createNullMatchingEquals(
-            OrderByElement.getOrderByExprs(OrderByElement.substitute(
-                windowGroup.orderByElements, sortSmap, analyzer_)),
-            sortTupleId, bufferedSmap);
-        LOG.trace("orderByEq: " + orderByEq.debugString());
-      }
-
-      root = new AnalyticEvalNode(ctx_.getNextNodeId(), root,
-          windowGroup.analyticFnCalls, windowGroup.partitionByExprs,
-          windowGroup.orderByElements, windowGroup.window,
-          windowGroup.physicalIntermediateTuple, windowGroup.physicalOutputTuple,
-          windowGroup.logicalToPhysicalSmap,
-          partitionByEq, orderByEq, bufferedTupleDesc);
-      root.init(analyzer_);
-    }
-    return root;
-  }
-
-  /**
-   * Create a predicate that checks if all exprs are equal or both sides are null.
-   */
-  private Expr createNullMatchingEquals(List<Expr> exprs, TupleId inputTid,
-      ExprSubstitutionMap bufferedSmap) {
-    Preconditions.checkState(!exprs.isEmpty());
-    Expr result = createNullMatchingEqualsAux(exprs, 0, inputTid, bufferedSmap);
-    result.analyzeNoThrow(analyzer_);
-    return result;
-  }
-
-  /**
-   * Create an unanalyzed predicate that checks if elements >= i are equal or
-   * both sides are null.
-   *
-   * The predicate has the form
-   * ((lhs[i] is null && rhs[i] is null) || (
-   *   lhs[i] is not null && rhs[i] is not null && lhs[i] = rhs[i]))
-   * && <createEqualsAux(i + 1)>
-   */
-  private Expr createNullMatchingEqualsAux(List<Expr> elements, int i,
-      TupleId inputTid, ExprSubstitutionMap bufferedSmap) {
-    if (i > elements.size() - 1) return new BoolLiteral(true);
-
-    // compare elements[i]
-    Expr lhs = elements.get(i);
-    Preconditions.checkState(lhs.isBound(inputTid));
-    Expr rhs = lhs.substitute(bufferedSmap, analyzer_, false);
-
-    Expr bothNull = new CompoundPredicate(Operator.AND,
-        new IsNullPredicate(lhs, false), new IsNullPredicate(rhs, false));
-    Expr lhsEqRhsNotNull = new CompoundPredicate(Operator.AND,
-        new CompoundPredicate(Operator.AND,
-            new IsNullPredicate(lhs, true), new IsNullPredicate(rhs, true)),
-        new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs));
-    Expr remainder = createNullMatchingEqualsAux(elements, i + 1, inputTid, bufferedSmap);
-    return new CompoundPredicate(CompoundPredicate.Operator.AND,
-        new CompoundPredicate(Operator.OR, bothNull, lhsEqRhsNotNull),
-        remainder);
-  }
-
-  /**
-   * Collection of AnalyticExprs that share the same partition-by/order-by/window
-   * specification. The AnalyticExprs are stored broken up into their constituent parts.
-   */
-  private static class WindowGroup {
-    public final List<Expr> partitionByExprs;
-    public final List<OrderByElement> orderByElements;
-    public final AnalyticWindow window; // not null
-
-    // Analytic exprs belonging to this window group and their corresponding logical
-    // intermediate and output slots from AnalyticInfo.intermediateTupleDesc_
-    // and AnalyticInfo.outputTupleDesc_.
-    public final List<AnalyticExpr> analyticExprs = Lists.newArrayList();
-    // Result of getFnCall() for every analytic expr.
-    public final List<Expr> analyticFnCalls = Lists.newArrayList();
-    public final List<SlotDescriptor> logicalOutputSlots = Lists.newArrayList();
-    public final List<SlotDescriptor> logicalIntermediateSlots = Lists.newArrayList();
-
-    // Physical output and intermediate tuples as well as an smap that maps the
-    // corresponding logical output slots to their physical slots in physicalOutputTuple.
-    // Set in init().
-    public TupleDescriptor physicalOutputTuple;
-    public TupleDescriptor physicalIntermediateTuple;
-    public final ExprSubstitutionMap logicalToPhysicalSmap = new ExprSubstitutionMap();
-
-    public WindowGroup(AnalyticExpr analyticExpr, SlotDescriptor logicalOutputSlot,
-        SlotDescriptor logicalIntermediateSlot) {
-      partitionByExprs = analyticExpr.getPartitionExprs();
-      orderByElements = analyticExpr.getOrderByElements();
-      window = analyticExpr.getWindow();
-      analyticExprs.add(analyticExpr);
-      analyticFnCalls.add(analyticExpr.getFnCall());
-      logicalOutputSlots.add(logicalOutputSlot);
-      logicalIntermediateSlots.add(logicalIntermediateSlot);
-    }
-
-    /**
-     * True if this analytic function must be evaluated in its own WindowGroup.
-     */
-    private static boolean requiresIndependentEval(AnalyticExpr analyticExpr) {
-      return analyticExpr.getFnCall().getFnName().getFunction().equals(
-          AnalyticExpr.FIRST_VALUE_REWRITE);
-    }
-
-    /**
-     * True if the partition exprs and ordering elements and the window of analyticExpr
-     * match ours.
-     */
-    public boolean isCompatible(AnalyticExpr analyticExpr) {
-      if (requiresIndependentEval(analyticExprs.get(0)) ||
-          requiresIndependentEval(analyticExpr)) {
-        return false;
-      }
-
-      if (!Expr.equalSets(analyticExpr.getPartitionExprs(), partitionByExprs)) {
-        return false;
-      }
-      if (!analyticExpr.getOrderByElements().equals(orderByElements)) return false;
-      if ((window == null) != (analyticExpr.getWindow() == null)) return false;
-      if (window == null) return true;
-      return analyticExpr.getWindow().equals(window);
-    }
-
-    /**
-     * Adds the given analytic expr and its logical slots to this window group.
-     * Assumes the corresponding analyticExpr is compatible with 'this'.
-     */
-    public void add(AnalyticExpr analyticExpr, SlotDescriptor logicalOutputSlot,
-        SlotDescriptor logicalIntermediateSlot) {
-      Preconditions.checkState(isCompatible(analyticExpr));
-      analyticExprs.add(analyticExpr);
-      analyticFnCalls.add(analyticExpr.getFnCall());
-      logicalOutputSlots.add(logicalOutputSlot);
-      logicalIntermediateSlots.add(logicalIntermediateSlot);
-    }
-
-    /**
-     * Creates the physical output and intermediate tuples as well as the logical to
-     * physical smap for this window group. Computes the mem layout for the tuple
-     * descriptors.
-     */
-    public void init(Analyzer analyzer, String tupleName) {
-      Preconditions.checkState(physicalOutputTuple == null);
-      Preconditions.checkState(physicalIntermediateTuple == null);
-      Preconditions.checkState(analyticFnCalls.size() == analyticExprs.size());
-
-      // If needed, create the intermediate tuple first to maintain
-      // intermediateTupleId < outputTupleId for debugging purposes and consistency with
-      // tuple creation for aggregations.
-      boolean requiresIntermediateTuple =
-          AggregateInfoBase.requiresIntermediateTuple(analyticFnCalls);
-      if (requiresIntermediateTuple) {
-        physicalIntermediateTuple =
-            analyzer.getDescTbl().createTupleDescriptor(tupleName + "intermed");
-        physicalOutputTuple =
-            analyzer.getDescTbl().createTupleDescriptor(tupleName + "out");
-      } else {
-        physicalOutputTuple =
-            analyzer.getDescTbl().createTupleDescriptor(tupleName + "out");
-        physicalIntermediateTuple = physicalOutputTuple;
-      }
-
-      Preconditions.checkState(analyticExprs.size() == logicalIntermediateSlots.size());
-      Preconditions.checkState(analyticExprs.size() == logicalOutputSlots.size());
-      for (int i = 0; i < analyticExprs.size(); ++i) {
-        SlotDescriptor logicalOutputSlot = logicalOutputSlots.get(i);
-        SlotDescriptor physicalOutputSlot =
-            analyzer.copySlotDescriptor(logicalOutputSlot, physicalOutputTuple);
-        physicalOutputSlot.setIsMaterialized(true);
-        if (requiresIntermediateTuple) {
-          SlotDescriptor logicalIntermediateSlot = logicalIntermediateSlots.get(i);
-          SlotDescriptor physicalIntermediateSlot = analyzer.copySlotDescriptor(
-              logicalIntermediateSlot, physicalIntermediateTuple);
-          physicalIntermediateSlot.setIsMaterialized(true);
-        }
-        logicalToPhysicalSmap.put(
-            new SlotRef(logicalOutputSlot), new SlotRef(physicalOutputSlot));
-      }
-      physicalOutputTuple.computeMemLayout();
-      if (requiresIntermediateTuple) physicalIntermediateTuple.computeMemLayout();
-    }
-  }
-
-  /**
-   * Extract a minimal set of WindowGroups from analyticExprs.
-   */
-  private List<WindowGroup> collectWindowGroups() {
-    List<Expr> analyticExprs = analyticInfo_.getAnalyticExprs();
-    List<WindowGroup> groups = Lists.newArrayList();
-    for (int i = 0; i < analyticExprs.size(); ++i) {
-      AnalyticExpr analyticExpr = (AnalyticExpr) analyticExprs.get(i);
-      // Do not generate the plan for non-materialized analytic exprs.
-      if (!analyticInfo_.getOutputTupleDesc().getSlots().get(i).isMaterialized()) {
-        continue;
-      }
-      boolean match = false;
-      for (WindowGroup group: groups) {
-        if (group.isCompatible(analyticExpr)) {
-          group.add((AnalyticExpr) analyticInfo_.getAnalyticExprs().get(i),
-              analyticInfo_.getOutputTupleDesc().getSlots().get(i),
-              analyticInfo_.getIntermediateTupleDesc().getSlots().get(i));
-          match = true;
-          break;
-        }
-      }
-      if (!match) {
-        groups.add(new WindowGroup(
-            (AnalyticExpr) analyticInfo_.getAnalyticExprs().get(i),
-            analyticInfo_.getOutputTupleDesc().getSlots().get(i),
-            analyticInfo_.getIntermediateTupleDesc().getSlots().get(i)));
-      }
-    }
-    return groups;
-  }
-
-  /**
-   * Collection of WindowGroups that share the same partition-by/order-by
-   * specification.
-   */
-  private static class SortGroup {
-    public List<Expr> partitionByExprs;
-    public List<OrderByElement> orderByElements;
-    public List<WindowGroup> windowGroups = Lists.newArrayList();
-
-    // sum of windowGroups.physicalOutputTuple.getByteSize()
-    public int totalOutputTupleSize = -1;
-
-    public SortGroup(WindowGroup windowGroup) {
-      partitionByExprs = windowGroup.partitionByExprs;
-      orderByElements = windowGroup.orderByElements;
-      windowGroups.add(windowGroup);
-    }
-
-    /**
-     * True if the partition and ordering exprs of windowGroup match ours.
-     */
-    public boolean isCompatible(WindowGroup windowGroup) {
-      return Expr.equalSets(windowGroup.partitionByExprs, partitionByExprs)
-          && windowGroup.orderByElements.equals(orderByElements);
-    }
-
-    public void add(WindowGroup windowGroup) {
-      Preconditions.checkState(isCompatible(windowGroup));
-      windowGroups.add(windowGroup);
-    }
-
-    /**
-     * Return true if 'this' and other have compatible partition exprs and
-     * our orderByElements are a prefix of other's.
-     */
-    public boolean isPrefixOf(SortGroup other) {
-      if (other.orderByElements.size() > orderByElements.size()) return false;
-      if (!Expr.equalSets(partitionByExprs, other.partitionByExprs)) return false;
-      for (int i = 0; i < other.orderByElements.size(); ++i) {
-        OrderByElement ob = orderByElements.get(i);
-        OrderByElement otherOb = other.orderByElements.get(i);
-        // TODO: compare equiv classes by comparing each equiv class's placeholder
-        // slotref
-        if (!ob.getExpr().equals(otherOb.getExpr())) return false;
-        if (ob.isAsc() != otherOb.isAsc()) return false;
-        if (ob.nullsFirst() != otherOb.nullsFirst()) return false;
-      }
-      return true;
-    }
-
-    /**
-     * Adds other's window groups to ours, assuming that we're a prefix of other.
-     */
-    public void absorb(SortGroup other) {
-      Preconditions.checkState(isPrefixOf(other));
-      windowGroups.addAll(other.windowGroups);
-    }
-
-    /**
-     * Compute totalOutputTupleSize.
-     */
-    public void init() {
-      totalOutputTupleSize = 0;
-      for (WindowGroup g: windowGroups) {
-        TupleDescriptor outputTuple = g.physicalOutputTuple;
-        Preconditions.checkState(outputTuple.isMaterialized());
-        Preconditions.checkState(outputTuple.getByteSize() != -1);
-        totalOutputTupleSize += outputTuple.getByteSize();
-      }
-    }
-
-    private static class SizeLt implements Comparator<WindowGroup> {
-      public int compare(WindowGroup wg1, WindowGroup wg2) {
-        Preconditions.checkState(wg1.physicalOutputTuple != null
-            && wg1.physicalOutputTuple.getByteSize() != -1);
-        Preconditions.checkState(wg2.physicalOutputTuple != null
-            && wg2.physicalOutputTuple.getByteSize() != -1);
-        int diff = wg1.physicalOutputTuple.getByteSize()
-            - wg2.physicalOutputTuple.getByteSize();
-        return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-      }
-    }
-
-    private static final SizeLt SIZE_LT;
-    static {
-      SIZE_LT = new SizeLt();
-    }
-
-    /**
-     * Order window groups by increasing size of the output tuple. This minimizes
-     * the total volume of data that needs to be buffered.
-     */
-    public void orderWindowGroups() {
-      Collections.sort(windowGroups, SIZE_LT);
-    }
-  }
-
-  /**
-   * Partitions the windowGroups into SortGroups based on compatible order by exprs.
-   */
-  private List<SortGroup> collectSortGroups(List<WindowGroup> windowGroups) {
-    List<SortGroup> sortGroups = Lists.newArrayList();
-    for (WindowGroup windowGroup: windowGroups) {
-      boolean match = false;
-      for (SortGroup sortGroup: sortGroups) {
-        if (sortGroup.isCompatible(windowGroup)) {
-          sortGroup.add(windowGroup);
-          match = true;
-          break;
-        }
-      }
-      if (!match) sortGroups.add(new SortGroup(windowGroup));
-    }
-    return sortGroups;
-  }
-
-  /**
-   * Collection of SortGroups that have compatible partition-by specifications.
-   */
-  private static class PartitionGroup {
-    public List<Expr> partitionByExprs;
-    public List<SortGroup> sortGroups = Lists.newArrayList();
-
-    // sum of sortGroups.windowGroups.physicalOutputTuple.getByteSize()
-    public int totalOutputTupleSize = -1;
-
-    public PartitionGroup(SortGroup sortGroup) {
-      partitionByExprs = sortGroup.partitionByExprs;
-      sortGroups.add(sortGroup);
-      totalOutputTupleSize = sortGroup.totalOutputTupleSize;
-    }
-
-    /**
-     * True if the partition exprs of sortGroup are compatible with ours.
-     * For now that means equality.
-     */
-    public boolean isCompatible(SortGroup sortGroup) {
-      return Expr.equalSets(sortGroup.partitionByExprs, partitionByExprs);
-    }
-
-    public void add(SortGroup sortGroup) {
-      Preconditions.checkState(isCompatible(sortGroup));
-      sortGroups.add(sortGroup);
-      totalOutputTupleSize += sortGroup.totalOutputTupleSize;
-    }
-
-    /**
-     * Merge 'other' into 'this'
-     * - partitionByExprs is the intersection of the two
-     * - sortGroups becomes the union
-     */
-    public void merge(PartitionGroup other) {
-      partitionByExprs = Expr.intersect(partitionByExprs, other.partitionByExprs);
-      Preconditions.checkState(Expr.getNumDistinctValues(partitionByExprs) >= 0);
-      sortGroups.addAll(other.sortGroups);
-    }
-
-    /**
-     * Order sort groups by increasing totalOutputTupleSize. This minimizes the total
-     * volume of data that needs to be sorted.
-     */
-    public void orderSortGroups() {
-      Collections.sort(sortGroups,
-          new Comparator<SortGroup>() {
-            public int compare(SortGroup sg1, SortGroup sg2) {
-              Preconditions.checkState(sg1.totalOutputTupleSize > 0);
-              Preconditions.checkState(sg2.totalOutputTupleSize > 0);
-              int diff = sg1.totalOutputTupleSize - sg2.totalOutputTupleSize;
-              return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-            }
-          });
-      for (SortGroup sortGroup: sortGroups) {
-        sortGroup.orderWindowGroups();
-      }
-    }
-  }
-
-  /**
-   * Extract a minimal set of PartitionGroups from sortGroups.
-   */
-  private List<PartitionGroup> collectPartitionGroups(List<SortGroup> sortGroups) {
-    List<PartitionGroup> partitionGroups = Lists.newArrayList();
-    for (SortGroup sortGroup: sortGroups) {
-      boolean match = false;
-      for (PartitionGroup partitionGroup: partitionGroups) {
-        if (partitionGroup.isCompatible(sortGroup)) {
-          partitionGroup.add(sortGroup);
-          match = true;
-          break;
-        }
-      }
-      if (!match) partitionGroups.add(new PartitionGroup(sortGroup));
-    }
-    return partitionGroups;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/CohortId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/CohortId.java b/fe/src/main/java/com/cloudera/impala/planner/CohortId.java
deleted file mode 100644
index d58e5c4..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/CohortId.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class CohortId extends Id<CohortId> {
-  // Construction only allowed via an IdGenerator.
-  protected CohortId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<CohortId> createGenerator() {
-    return new IdGenerator<CohortId>() {
-      @Override
-      public CohortId getNextId() { return new CohortId(nextId_++); }
-      @Override
-      public CohortId getMaxId() { return new CohortId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("%02d", id_);
-  }
-}


[03/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
new file mode 100644
index 0000000..b1f9b95
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
@@ -0,0 +1,152 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+
+import org.apache.commons.lang3.StringUtils;
+
+import parquet.Strings;
+
+import com.cloudera.impala.analysis.Path.PathType;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
+import com.cloudera.impala.catalog.StructType;
+import com.cloudera.impala.catalog.TableLoadingException;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDescribeOutputStyle;
+import com.cloudera.impala.thrift.TDescribeTableParams;
+import com.google.common.base.Preconditions;
+
+/**
+ * Representation of a DESCRIBE table statement which returns metadata on
+ * a specified table:
+ * Syntax: DESCRIBE <path>
+ *         DESCRIBE FORMATTED|EXTENDED <table>
+ *
+ * If FORMATTED|EXTENDED is not specified and the path refers to a table, the statement
+ * only returns info on the given table's column definition (column name, data type, and
+ * comment). If the path refers to a complex typed field within a column, the statement
+ * returns the field names, types, and comments.
+ * If FORMATTED|EXTENDED is specified, extended metadata on the table is returned
+ * (in addition to the column definitions). This metadata includes info about the table
+ * properties, SerDe properties, StorageDescriptor properties, and more.
+ */
+public class DescribeTableStmt extends StatementBase {
+  private final TDescribeOutputStyle outputStyle_;
+
+  /// "."-separated path from the describe statement.
+  private ArrayList<String> rawPath_;
+
+  /// The resolved path to describe, set after analysis.
+  private Path path_;
+
+  /// The fully qualified name of the root table, set after analysis.
+  private TableName tableName_;
+
+  /// Struct type with the fields to display for the described path.
+  private StructType resultStruct_;
+
+  public DescribeTableStmt(ArrayList<String> rawPath, TDescribeOutputStyle outputStyle) {
+    Preconditions.checkNotNull(rawPath);
+    Preconditions.checkArgument(!rawPath.isEmpty());
+    rawPath_ = rawPath;
+    outputStyle_ = outputStyle;
+    path_ = null;
+    tableName_ = null;
+    resultStruct_ = null;
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DESCRIBE ");
+    if (outputStyle_ != TDescribeOutputStyle.MINIMAL) {
+      sb.append(outputStyle_.toString() + " ");
+    }
+    return sb.toString() + StringUtils.join(rawPath_, ".");
+  }
+
+  public TableName getTableName() { return tableName_; }
+  public TDescribeOutputStyle getOutputStyle() { return outputStyle_; }
+
+
+  /**
+   * Get the privilege requirement, which depends on the output style.
+   */
+  private Privilege getPrivilegeRequirement() {
+    switch (outputStyle_) {
+      case MINIMAL: return Privilege.ANY;
+      case FORMATTED:
+      case EXTENDED:
+        return Privilege.VIEW_METADATA;
+      default:
+        Preconditions.checkArgument(false);
+        return null;
+    }
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    try {
+      path_ = analyzer.resolvePath(rawPath_, PathType.ANY);
+    } catch (AnalysisException ae) {
+      // Register privilege requests to prefer reporting an authorization error over
+      // an analysis error. We should not accidentally reveal the non-existence of a
+      // table/database if the user is not authorized.
+      if (analyzer.hasMissingTbls()) throw ae;
+      if (rawPath_.size() > 1) {
+        analyzer.registerPrivReq(new PrivilegeRequestBuilder()
+            .onTable(rawPath_.get(0), rawPath_.get(1))
+            .allOf(getPrivilegeRequirement()).toRequest());
+      }
+      analyzer.registerPrivReq(new PrivilegeRequestBuilder()
+          .onTable(analyzer.getDefaultDb(), rawPath_.get(0))
+          .allOf(getPrivilegeRequirement()).toRequest());
+      throw ae;
+    } catch (TableLoadingException tle) {
+      throw new AnalysisException(tle.getMessage(), tle);
+    }
+
+    tableName_ = analyzer.getFqTableName(path_.getRootTable().getTableName());
+    analyzer.getTable(tableName_, getPrivilegeRequirement());
+
+    if (path_.destTable() != null) {
+      resultStruct_ = path_.getRootTable().getHiveColumnsAsStruct();
+    } else if (path_.destType().isComplexType()) {
+      if (outputStyle_ == TDescribeOutputStyle.FORMATTED ||
+          outputStyle_ == TDescribeOutputStyle.EXTENDED) {
+        throw new AnalysisException("DESCRIBE FORMATTED|EXTENDED must refer to a table");
+      }
+      Preconditions.checkState(outputStyle_ == TDescribeOutputStyle.MINIMAL);
+      resultStruct_ = Path.getTypeAsStruct(path_.destType());
+    } else {
+      throw new AnalysisException("Cannot describe path '" +
+          Strings.join(rawPath_, ".") + "' targeting scalar type: " +
+          path_.destType().toSql());
+    }
+  }
+
+  public TDescribeTableParams toThrift() {
+    TDescribeTableParams params = new TDescribeTableParams();
+    params.setTable_name(getTableName().getTbl());
+    params.setDb(getTableName().getDb());
+    params.setOutput_style(outputStyle_);
+    params.setResult_struct(resultStruct_.toThrift());
+    return params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
new file mode 100644
index 0000000..c0d7571
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
@@ -0,0 +1,198 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.IdGenerator;
+import com.cloudera.impala.thrift.TDescriptorTable;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Repository for tuple (and slot) descriptors.
+ * Descriptors should only be created through this class, which assigns
+ * them unique ids.
+ */
+public class DescriptorTable {
+  private final HashMap<TupleId, TupleDescriptor> tupleDescs_ = Maps.newHashMap();
+  private final HashMap<SlotId, SlotDescriptor> slotDescs_ = Maps.newHashMap();
+  private final IdGenerator<TupleId> tupleIdGenerator_ = TupleId.createGenerator();
+  private final IdGenerator<SlotId> slotIdGenerator_ = SlotId.createGenerator();
+  // List of referenced tables with no associated TupleDescriptor to ship to the BE.
+  // For example, the output table of an insert query.
+  private final List<Table> referencedTables_ = Lists.newArrayList();
+  // For each table, the set of partitions that are referenced by at least one scan range.
+  private final HashMap<Table, HashSet<Long>> referencedPartitionsPerTable_ =
+      Maps.newHashMap();
+
+  public TupleDescriptor createTupleDescriptor(String debugName) {
+    TupleDescriptor d = new TupleDescriptor(tupleIdGenerator_.getNextId(), debugName);
+    tupleDescs_.put(d.getId(), d);
+    return d;
+  }
+
+  /**
+   * Create copy of src with new id. The returned descriptor has its mem layout
+   * computed.
+   */
+  public TupleDescriptor copyTupleDescriptor(TupleId srcId, String debugName) {
+    TupleDescriptor d = new TupleDescriptor(tupleIdGenerator_.getNextId(), debugName);
+    tupleDescs_.put(d.getId(), d);
+    // create copies of slots
+    TupleDescriptor src = tupleDescs_.get(srcId);
+    for (SlotDescriptor slot: src.getSlots()) {
+      copySlotDescriptor(d, slot);
+    }
+    d.computeMemLayout();
+    Preconditions.checkState(d.getByteSize() == src.getByteSize());
+    return d;
+  }
+
+  public SlotDescriptor addSlotDescriptor(TupleDescriptor d) {
+    SlotDescriptor result = new SlotDescriptor(slotIdGenerator_.getNextId(), d);
+    d.addSlot(result);
+    slotDescs_.put(result.getId(), result);
+    return result;
+  }
+
+  /**
+   * Append copy of src to dest.
+   */
+  public SlotDescriptor copySlotDescriptor(TupleDescriptor dest, SlotDescriptor src) {
+    SlotDescriptor result = new SlotDescriptor(slotIdGenerator_.getNextId(), dest, src);
+    dest.addSlot(result);
+    slotDescs_.put(result.getId(), result);
+    return result;
+  }
+
+  public TupleDescriptor getTupleDesc(TupleId id) { return tupleDescs_.get(id); }
+  public SlotDescriptor getSlotDesc(SlotId id) { return slotDescs_.get(id); }
+  public Collection<TupleDescriptor> getTupleDescs() { return tupleDescs_.values(); }
+  public Collection<SlotDescriptor> getSlotDescs() { return slotDescs_.values(); }
+  public TupleId getMaxTupleId() { return tupleIdGenerator_.getMaxId(); }
+  public SlotId getMaxSlotId() { return slotIdGenerator_.getMaxId(); }
+
+  public void addReferencedTable(Table table) {
+    referencedTables_.add(table);
+  }
+
+  /**
+   * Find the set of referenced partitions for the given table.  Allocates a set if
+   * none has been allocated for the table yet.
+   */
+  private HashSet<Long> getReferencedPartitions(Table table) {
+    HashSet<Long> refPartitions = referencedPartitionsPerTable_.get(table);
+    if (refPartitions == null) {
+      refPartitions = new HashSet<Long>();
+      referencedPartitionsPerTable_.put(table, refPartitions);
+    }
+    return refPartitions;
+  }
+
+  /**
+   * Add the partition with ID partitionId to the set of referenced partitions for the
+   * given table.
+   */
+  public void addReferencedPartition(Table table, long partitionId) {
+    getReferencedPartitions(table).add(partitionId);
+  }
+
+  /**
+   * Marks all slots in list as materialized.
+   */
+  public void markSlotsMaterialized(List<SlotId> ids) {
+    for (SlotId id: ids) {
+      getSlotDesc(id).setIsMaterialized(true);
+    }
+  }
+
+  /**
+   * Return all ids in slotIds that belong to tupleId.
+   */
+  public List<SlotId> getTupleSlotIds(List<SlotId> slotIds, TupleId tupleId) {
+    List<SlotId> result = Lists.newArrayList();
+    for (SlotId id: slotIds) {
+      if (getSlotDesc(id).getParent().getId().equals(tupleId)) result.add(id);
+    }
+    return result;
+  }
+
+  // Computes physical layout parameters of all descriptors.
+  // Call this only after the last descriptor was added.
+  // Test-only.
+  public void computeMemLayout() {
+    for (TupleDescriptor d: tupleDescs_.values()) {
+      d.computeMemLayout();
+    }
+  }
+
+  public TDescriptorTable toThrift() {
+    TDescriptorTable result = new TDescriptorTable();
+    HashSet<Table> referencedTbls = Sets.newHashSet();
+    HashSet<Table> allPartitionsTbls = Sets.newHashSet();
+    for (TupleDescriptor tupleDesc: tupleDescs_.values()) {
+      // inline view of a non-constant select has a non-materialized tuple descriptor
+      // in the descriptor table just for type checking, which we need to skip
+      if (tupleDesc.isMaterialized()) {
+        // TODO: Ideally, we should call tupleDesc.checkIsExecutable() here, but there
+        // currently are several situations in which we send materialized tuples without
+        // a mem layout to the BE, e.g., when unnesting unions or when replacing plan
+        // trees with an EmptySetNode.
+        result.addToTupleDescriptors(tupleDesc.toThrift());
+        Table table = tupleDesc.getTable();
+        if (table != null && !(table instanceof View)) referencedTbls.add(table);
+        // Only serialize materialized slots
+        for (SlotDescriptor slotD: tupleDesc.getMaterializedSlots()) {
+          result.addToSlotDescriptors(slotD.toThrift());
+        }
+      }
+    }
+    for (Table table: referencedTables_) {
+      referencedTbls.add(table);
+      // We don't know which partitions are needed for INSERT, so include them all.
+      allPartitionsTbls.add(table);
+    }
+    for (Table tbl: referencedTbls) {
+      HashSet<Long> referencedPartitions = null; // null means include all partitions.
+      if (!allPartitionsTbls.contains(tbl)) {
+        referencedPartitions = getReferencedPartitions(tbl);
+      }
+      result.addToTableDescriptors(tbl.toThriftDescriptor(referencedPartitions));
+    }
+    return result;
+  }
+
+  public String debugString() {
+    StringBuilder out = new StringBuilder();
+    out.append("tuples:\n");
+    for (TupleDescriptor desc: tupleDescs_.values()) {
+      out.append(desc.debugString() + "\n");
+    }
+    return out.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
new file mode 100644
index 0000000..e718d6b
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDistributeByHashParam;
+import com.cloudera.impala.thrift.TDistributeByRangeParam;
+import com.cloudera.impala.thrift.TDistributeParam;
+import com.cloudera.impala.thrift.TDistributeType;
+import com.cloudera.impala.thrift.TRangeLiteral;
+import com.cloudera.impala.thrift.TRangeLiteralList;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Represents the information of
+ *
+ * DISTRIBUTE BY HASH[(col_def_list)] INTO n BUCKETS
+ * DISTRIBUTE BY RANGE[(col_def_list)] SPLIT ROWS ( (v1,v2,v3), ...)
+ *
+ * clauses in CREATE TABLE statements, where available, e.g. Kudu.
+ *
+ * A table can be hash or range partitioned, or combinations of both. A distribute
+ * clause represents one particular distribution rule. For both HASH and RANGE types,
+ * some of the error checking is done during the analysis, but most of it is deferred
+ * until the table is actually created.
+  */
+public class DistributeParam implements ParseNode {
+
+  /**
+   * Creates a DistributeParam partitioned by hash.
+   */
+  public static DistributeParam createHashParam(List<String> cols, BigDecimal buckets) {
+    return new DistributeParam(Type.HASH, cols, buckets);
+  }
+
+  /**
+   * Creates a DistributeParam partitioned by range.
+   */
+  public static DistributeParam createRangeParam(List<String> cols,
+      ArrayList<ArrayList<LiteralExpr>> splitRows) {
+    return new DistributeParam(Type.RANGE, cols, splitRows);
+  }
+
+  private static final int NO_BUCKETS = -1;
+
+  /**
+   * The type of the distribution rule.
+   */
+  public enum Type {
+    HASH, RANGE
+  };
+
+  private List<String> columns_;
+
+  private final Type type_;
+
+  // Only relevant for hash partitioning, -1 otherwise
+  private final int num_buckets_;
+
+  // Only relevant for range partitioning, null otherwise
+  private final ArrayList<ArrayList<LiteralExpr>> splitRows_;
+
+  // Set in analyze()
+  private TDistributeByRangeParam rangeParam_;
+
+  private DistributeParam(Type t, List<String> cols, BigDecimal buckets) {
+    type_ = t;
+    columns_ = cols;
+    num_buckets_ = buckets.intValue();
+    splitRows_ = null;
+  }
+
+  private DistributeParam(Type t, List<String> cols,
+      ArrayList<ArrayList<LiteralExpr>> splitRows) {
+    type_ = t;
+    columns_ = cols;
+    splitRows_ = splitRows;
+    num_buckets_ = NO_BUCKETS;
+  }
+
+  /**
+   * TODO Refactor the logic below to analyze 'columns_'. This analysis should output
+   * a vector of column types that would then be used during the analysis of the split
+   * rows.
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (type_ == Type.HASH && num_buckets_ <= 1) {
+      throw new AnalysisException(String.format(
+          "Number of buckets in DISTRIBUTE BY clause '%s' must be larger than 1.",
+          toSql()));
+    } else if (type_ == Type.RANGE) {
+      // Creating the thrift structure simultaneously checks for semantic errors
+      rangeParam_ = new TDistributeByRangeParam();
+      rangeParam_.setColumns(columns_);
+
+      for (ArrayList<LiteralExpr> splitRow : splitRows_) {
+        TRangeLiteralList list = new TRangeLiteralList();
+        if (splitRow.size() != columns_.size()) {
+          throw new AnalysisException(String.format(
+              "SPLIT ROWS has different size than number of projected key columns: %d. "
+                  + "Split row: %s", columns_.size(), splitRowToString(splitRow)));
+        }
+        for (LiteralExpr expr : splitRow) {
+          expr.analyze(analyzer);
+          TRangeLiteral literal = new TRangeLiteral();
+          if (expr instanceof NumericLiteral) {
+            NumericLiteral num = (NumericLiteral) expr;
+            if (num.getType().isDecimal() || num.getType().isFloatingPointType()) {
+              throw new AnalysisException("Only integral and string values allowed for" +
+                  " split rows.");
+            } else {
+              literal.setInt_literal(num.getIntValue());
+            }
+          } else if (expr instanceof StringLiteral) {
+            StringLiteral string = (StringLiteral) expr;
+            literal.setString_literal(string.getStringValue());
+          } else if (expr instanceof BoolLiteral) {
+            BoolLiteral bool = (BoolLiteral) expr;
+            literal.setBool_literal(bool.getValue());
+          } else {
+            throw new AnalysisException(String.format("Split row value is not supported: "
+                + "%s (Type: %s).", expr.getStringValue(), expr.getType().toSql()));
+          }
+          list.addToValues(literal);
+        }
+        rangeParam_.addToSplit_rows(list);
+      }
+    }
+  }
+
+  @Override
+  public String toSql() {
+    if (num_buckets_ == NO_BUCKETS) {
+      List<String> splitRowStrings = Lists.newArrayList();
+      for (ArrayList<LiteralExpr> splitRow : splitRows_) {
+        splitRowStrings.add(splitRowToString(splitRow));
+      }
+      return String.format("RANGE(%s) INTO RANGES(%s)", Joiner.on(", ").join(columns_),
+          Joiner.on(", ").join(splitRowStrings));
+    } else {
+      return String.format("HASH(%s) INTO %d BUCKETS", Joiner.on(", ").join(columns_),
+          num_buckets_);
+    }
+  }
+
+  private String splitRowToString(ArrayList<LiteralExpr> splitRow) {
+    StringBuilder builder = new StringBuilder();
+    builder.append("(");
+    List<String> rangeElementStrings = Lists.newArrayList();
+    for (LiteralExpr rangeElement : splitRow) {
+      rangeElementStrings.add(rangeElement.toSql());
+    }
+    builder.append(Joiner.on(", ").join(rangeElementStrings));
+    builder.append(")");
+    return builder.toString();
+  }
+
+  TDistributeParam toThrift() {
+    TDistributeParam result = new TDistributeParam();
+    if (type_ == Type.HASH) {
+      TDistributeByHashParam hash = new TDistributeByHashParam();
+      hash.setNum_buckets(num_buckets_);
+      hash.setColumns(columns_);
+      result.setBy_hash_param(hash);
+    } else {
+      Preconditions.checkState(type_ == Type.RANGE);
+
+      result.setBy_range_param(rangeParam_);
+    }
+    return result;
+  }
+
+  public List<String> getColumns() { return columns_; }
+  public void setColumns(List<String> cols) { columns_ = cols; }
+  public Type getType_() { return type_; }
+  public int getNumBuckets() { return num_buckets_; }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
new file mode 100644
index 0000000..f5642fa
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDropDataSourceParams;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a DROP DATA SOURCE statement.
+ */
+public class DropDataSrcStmt extends StatementBase {
+
+  private final String dataSrcName_;
+  private final boolean ifExists_;
+
+  public DropDataSrcStmt(String dataSrcName, boolean ifExists) {
+    Preconditions.checkNotNull(dataSrcName);
+    this.dataSrcName_ = dataSrcName.toLowerCase();
+    this.ifExists_ = ifExists;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (!MetaStoreUtils.validateName(dataSrcName_) ||
+        (!ifExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) == null)) {
+      throw new AnalysisException(Analyzer.DATA_SRC_DOES_NOT_EXIST_ERROR_MSG +
+          dataSrcName_);
+    }
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("DROP DATA SOURCE ");
+    if (ifExists_) sb.append("IF EXISTS ");
+    sb.append(dataSrcName_);
+    return sb.toString();
+  }
+
+  public TDropDataSourceParams toThrift() {
+    return new TDropDataSourceParams(dataSrcName_).setIf_exists(ifExists_);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
new file mode 100644
index 0000000..af7fae1
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDropDbParams;
+
+/**
+ * Represents a DROP [IF EXISTS] DATABASE [CASCADE | RESTRICT] statement
+ */
+public class DropDbStmt extends StatementBase {
+  private final String dbName_;
+  private final boolean ifExists_;
+  private final boolean cascade_;
+
+  /**
+   * Constructor for building the drop statement. If ifExists is true, an error will not
+   * be thrown if the database does not exist. If cascade is true, all the tables in the
+   * database will be dropped.
+   */
+  public DropDbStmt(String dbName, boolean ifExists, boolean cascade) {
+    this.dbName_ = dbName;
+    this.ifExists_ = ifExists;
+    this.cascade_ = cascade;
+  }
+
+  public String getDb() { return dbName_; }
+  public boolean getIfExists() { return ifExists_; }
+  public boolean getCascade() { return cascade_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DROP DATABASE");
+    if (ifExists_) sb.append(" IF EXISTS ");
+    sb.append(getDb());
+    if (cascade_) sb.append(" CASCADE");
+    return sb.toString();
+  }
+
+  public TDropDbParams toThrift() {
+    TDropDbParams params = new TDropDbParams();
+    params.setDb(getDb());
+    params.setIf_exists(getIfExists());
+    params.setCascade(getCascade());
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    Db db = analyzer.getDb(dbName_, Privilege.DROP, false);
+    if (db == null && !ifExists_) {
+      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + dbName_);
+    }
+
+    if (analyzer.getDefaultDb().toLowerCase().equals(dbName_.toLowerCase())) {
+      throw new AnalysisException("Cannot drop current default database: " + dbName_);
+    }
+    if (db != null && db.numFunctions() > 0 && !cascade_) {
+      throw new AnalysisException("Cannot drop non-empty database: " + dbName_);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
new file mode 100644
index 0000000..39f5ff9
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
@@ -0,0 +1,113 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.AuthorizeableFn;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequest;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDropFunctionParams;
+import com.cloudera.impala.thrift.TFunctionCategory;
+
+import java.util.ArrayList;
+
+/**
+ * Represents a DROP [IF EXISTS] FUNCTION statement
+ * TODO: try to consolidate this with the other Drop*Stmt class, perhaps
+ * by adding a DropStatementBase class.
+ */
+public class DropFunctionStmt extends StatementBase {
+  private final FunctionName fnName_;
+  private final FunctionArgs fnArgs_;
+  private final boolean ifExists_;
+
+  // Set in analyze().
+  private Function desc_;
+
+  /**
+   * Constructor for building the drop statement. If ifExists is true, an error will not
+   * be thrown if the function does not exist.
+   */
+  public DropFunctionStmt(FunctionName fnName, FunctionArgs fnArgs, boolean ifExists) {
+    fnName_ = fnName;
+    fnArgs_ = fnArgs;
+    ifExists_ = ifExists;
+  }
+
+  public FunctionName getFunction() { return desc_.getFunctionName(); }
+  public boolean getIfExists() { return ifExists_; }
+  private boolean hasSignature() { return fnArgs_ != null; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DROP FUNCTION");
+    if (ifExists_) sb.append(" IF EXISTS ");
+    sb.append(desc_.signatureString());
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public TDropFunctionParams toThrift() {
+    TDropFunctionParams params = new TDropFunctionParams();
+    params.setFn_name(desc_.getFunctionName().toThrift());
+    params.setArg_types(Type.toThrift(desc_.getArgs()));
+    params.setIf_exists(getIfExists());
+    if (hasSignature()) params.setSignature(desc_.signatureString());
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    fnName_.analyze(analyzer);
+
+    if (hasSignature()) {
+      fnArgs_.analyze(analyzer);
+      desc_ = new Function(fnName_, fnArgs_.getArgTypes(), Type.INVALID,
+          fnArgs_.hasVarArgs());
+    } else {
+      desc_ = new Function(fnName_, new ArrayList<Type>(), Type.INVALID,
+          false);
+    }
+
+    // For now, if authorization is enabled, the user needs ALL on the server
+    // to drop functions.
+    // TODO: this is not the right granularity but acceptable for now.
+    analyzer.registerPrivReq(new PrivilegeRequest(
+        new AuthorizeableFn(desc_.signatureString()), Privilege.ALL));
+
+    Db db =  analyzer.getDb(desc_.dbName(), Privilege.DROP, false);
+    if (db == null && !ifExists_) {
+      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + desc_.dbName());
+    }
+
+    if (!hasSignature() && db != null && db.getFunctions(
+        desc_.functionName()).isEmpty() && !ifExists_) {
+      throw new AnalysisException(
+          Analyzer.FN_DOES_NOT_EXIST_ERROR_MSG + desc_.functionName());
+    }
+
+    if (hasSignature() && analyzer.getCatalog().getFunction(
+        desc_, Function.CompareMode.IS_IDENTICAL) == null && !ifExists_) {
+      throw new AnalysisException(
+          Analyzer.FN_DOES_NOT_EXIST_ERROR_MSG + desc_.signatureString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
new file mode 100644
index 0000000..90f9434
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
@@ -0,0 +1,105 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDropStatsParams;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents both a DROP STATS statement, and the DROP INCREMENTAL STATS <tbl> PARTITION
+ * <part_spec> variant.
+ */
+public class DropStatsStmt extends StatementBase {
+  protected final TableName tableName_;
+
+  // If non-null, only drop the statistics for a given partition
+  PartitionSpec partitionSpec_ = null;
+
+  // Set during analysis
+  protected String dbName_;
+
+  /**
+   * Constructor for building the DROP TABLE/VIEW statement
+   */
+  public DropStatsStmt(TableName tableName) {
+    this.tableName_ = tableName;
+  }
+
+  public DropStatsStmt(TableName tableName, PartitionSpec partSpec) {
+    this.tableName_ = tableName;
+    this.partitionSpec_ = partSpec;
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DROP ");
+    if (partitionSpec_ == null) {
+      sb.append(" STATS ");
+      if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
+      sb.append(tableName_.toSql());
+    } else {
+      sb.append(" INCREMENTAL STATS ");
+      if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
+      sb.append(tableName_.toSql());
+      sb.append(partitionSpec_.toSql());
+    }
+    return sb.toString();
+  }
+
+  public TDropStatsParams toThrift() {
+    TDropStatsParams params = new TDropStatsParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+
+    if (partitionSpec_ != null) {
+      params.setPartition_spec(partitionSpec_.toThrift());
+    }
+    return params;
+  }
+
+  /**
+   * Checks that the given table exists and the user has privileges
+   * to drop stats on this table.
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
+    Preconditions.checkNotNull(table);
+    if (partitionSpec_ != null) {
+      partitionSpec_.setTableName(tableName_);
+      partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+      partitionSpec_.setPartitionShouldExist();
+      partitionSpec_.analyze(analyzer);
+    }
+  }
+
+  /**
+   * Can only be called after analysis. Returns the name of the database that
+   * the target drop table resides in.
+   */
+  public String getDb() {
+    Preconditions.checkNotNull(dbName_);
+    return dbName_;
+  }
+
+  public String getTbl() { return tableName_.getTbl(); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
new file mode 100644
index 0000000..8371ace
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
@@ -0,0 +1,115 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDropTableOrViewParams;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a DROP TABLE/VIEW [IF EXISTS] statement
+ */
+public class DropTableOrViewStmt extends StatementBase {
+  protected final TableName tableName_;
+  protected final boolean ifExists_;
+
+  // True if we are dropping a table. False if we are dropping a view.
+  protected final boolean dropTable_;
+
+  // Setting this value causes dropped tables to be permanently
+  // deleted. For example, for hdfs tables it skips the trash directory
+  protected final boolean purgeTable_;
+
+  // Set during analysis
+  protected String dbName_;
+
+  /**
+   * Constructor for building the DROP TABLE/VIEW statement
+   */
+  public DropTableOrViewStmt(TableName tableName, boolean ifExists,
+      boolean dropTable, boolean purgeTable) {
+    tableName_ = tableName;
+    ifExists_ = ifExists;
+    dropTable_ = dropTable;
+    purgeTable_ = purgeTable;
+    // PURGE with a view is not allowed.
+    Preconditions.checkState(!(!dropTable_ && purgeTable_));
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DROP " + ((dropTable_) ? "TABLE " : "VIEW "));
+    if (ifExists_) sb.append("IF EXISTS ");
+    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
+    sb.append(tableName_.getTbl());
+    if (purgeTable_) sb.append(" PURGE");
+    return sb.toString();
+  }
+
+  public TDropTableOrViewParams toThrift() {
+    TDropTableOrViewParams params = new TDropTableOrViewParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+    params.setIf_exists(ifExists_);
+    params.setPurge(purgeTable_);
+    params.setIs_table(dropTable_);
+    return params;
+  }
+
+  /**
+   * 1. Checks that the user has privileges to DROP the given table/view
+   * 2. Checks that the database and table exists
+   * 3. Checks that the table type (TABLE/VIEW) matches the DROP TABLE/VIEW statement
+   * Note: Do not analyze tableName because we prefer to report an error indicating
+   * that the table/view does not exist even if the table/view name is invalid.
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    try {
+      Table table = analyzer.getTable(tableName_, Privilege.DROP);
+      Preconditions.checkNotNull(table);
+      if (table instanceof View && dropTable_) {
+        throw new AnalysisException(String.format(
+            "DROP TABLE not allowed on a view: %s.%s", dbName_, getTbl()));
+      }
+      if (!(table instanceof View) && !dropTable_) {
+        throw new AnalysisException(String.format(
+            "DROP VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
+      }
+    } catch (AnalysisException e) {
+      if (ifExists_ && analyzer.getMissingTbls().isEmpty()) return;
+      throw e;
+    }
+  }
+
+  /**
+   * Can only be called after analysis. Returns the name of the database that
+   * the target drop table resides in.
+   */
+  public String getDb() {
+    Preconditions.checkNotNull(dbName_);
+    return dbName_;
+  }
+
+  public String getTbl() { return tableName_.getTbl(); }
+  public boolean isDropTable() { return dropTable_; }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java b/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
new file mode 100644
index 0000000..df658b9
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.common.Id;
+import com.cloudera.impala.common.IdGenerator;
+
+public class EquivalenceClassId extends Id<EquivalenceClassId> {
+  // Construction only allowed via an IdGenerator.
+  protected EquivalenceClassId(int id) {
+    super(id);
+  }
+
+  public static IdGenerator<EquivalenceClassId> createGenerator() {
+    return new IdGenerator<EquivalenceClassId>() {
+      @Override
+      public EquivalenceClassId getNextId() { return new EquivalenceClassId(nextId_++); }
+      @Override
+      public EquivalenceClassId getMaxId() { return new EquivalenceClassId(nextId_ - 1); }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java b/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
new file mode 100644
index 0000000..da984eb
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
@@ -0,0 +1,82 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExprNode;
+import com.google.common.base.Preconditions;
+
+/**
+ * Class representing a [NOT] EXISTS predicate.
+ */
+public class ExistsPredicate extends Predicate {
+  private final static Logger LOG = LoggerFactory.getLogger(
+      ExistsPredicate.class);
+  private boolean notExists_ = false;
+
+  public boolean isNotExists() { return notExists_; }
+
+  /**
+   * C'tor that initializes an ExistsPredicate from a Subquery.
+   */
+  public ExistsPredicate(Subquery subquery, boolean notExists) {
+    Preconditions.checkNotNull(subquery);
+    children_.add(subquery);
+    notExists_ = notExists;
+  }
+
+  @Override
+  public Expr negate() {
+    return new ExistsPredicate((Subquery)getChild(0), !notExists_);
+  }
+
+  /**
+   * Copy c'tor used in clone.
+   */
+  public ExistsPredicate(ExistsPredicate other) {
+    super(other);
+    notExists_ = other.notExists_;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    // Cannot serialize a nested predicate
+    Preconditions.checkState(false);
+  }
+
+  @Override
+  public Expr clone() { return new ExistsPredicate(this); }
+
+  @Override
+  public String toSqlImpl() {
+    StringBuilder strBuilder = new StringBuilder();
+    if (notExists_) strBuilder.append("NOT ");
+    strBuilder.append("EXISTS ");
+    strBuilder.append(getChild(0).toSql());
+    return strBuilder.toString();
+  }
+}



[59/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
IMPALA-3786: Replace "cloudera" with "apache" (part 2)

As part of the ASF transition, we need to replace references to
Cloudera in Impala with references to Apache. This primarily means
changing Java package names from com.cloudera.impala.* to
org.apache.impala.*

A prior patch renamed all the files as necessary, and this patch
performs the actual code changes. Most of the changes in this patch
were generated with some commands of the form:

find . | grep "\.java\|\.py\|\.h\|\.cc" | \
  xargs sed -i s/'com\(.\)cloudera\(\.\)impala/org\1apache\2impala/g

along with some manual fixes.

After this patch, the remaining references to Cloudera in the repo
mostly fall into the categories:
- External components that have cloudera in their own package names,
  eg. com.cloudera.kudu/llama
- URLs, eg. https://repository.cloudera.com/

Change-Id: I0d35fa6602a7fc0c212b2ef5e2b3322b77dde7e2
Reviewed-on: http://gerrit.cloudera.org:8080/3937
Reviewed-by: Thomas Tauber-Marshall <tm...@cloudera.com>
Reviewed-by: Jim Apple <jb...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/b2c2fe78
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/b2c2fe78
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/b2c2fe78

Branch: refs/heads/master
Commit: b2c2fe7813cbde794acc3dcf9c1d9e937683be72
Parents: b544f01
Author: Thomas Tauber-Marshall <tm...@cloudera.com>
Authored: Fri Aug 12 09:49:06 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Sep 29 21:14:13 2016 +0000

----------------------------------------------------------------------
 be/src/benchmarks/expr-benchmark.cc             |   4 +-
 be/src/catalog/catalog.cc                       |   2 +-
 be/src/catalog/catalog.h                        |   2 +-
 be/src/exec/external-data-source-executor.cc    |   4 +-
 be/src/exec/external-data-source-executor.h     |   2 +-
 be/src/exprs/hive-udf-call.cc                   |   2 +-
 be/src/scheduling/request-pool-service.cc       |   2 +-
 be/src/scheduling/request-pool-service.h        |   2 +-
 be/src/service/fe-support.cc                    |  26 +--
 be/src/service/frontend.cc                      |   2 +-
 be/src/service/frontend.h                       |   2 +-
 be/src/util/jni-util.cc                         |   4 +-
 be/src/util/logging-support.cc                  |   6 +-
 bin/create_testdata.sh                          |   4 +-
 bin/run-jdbc-client.sh                          |   2 +-
 common/function-registry/CMakeLists.txt         |   2 +-
 .../function-registry/gen_builtins_catalog.py   |   8 +-
 common/thrift/CatalogInternalService.thrift     |   2 +-
 common/thrift/CatalogObjects.thrift             |   2 +-
 common/thrift/CatalogService.thrift             |   2 +-
 common/thrift/Data.thrift                       |   2 +-
 common/thrift/DataSinks.thrift                  |   2 +-
 common/thrift/Descriptors.thrift                |   2 +-
 common/thrift/ExecStats.thrift                  |   2 +-
 common/thrift/Exprs.thrift                      |   2 +-
 common/thrift/ExternalDataSource.thrift         |   2 +-
 common/thrift/Frontend.thrift                   |   2 +-
 common/thrift/ImpalaInternalService.thrift      |   2 +-
 common/thrift/ImpalaService.thrift              |   2 +-
 common/thrift/JniCatalog.thrift                 |   2 +-
 common/thrift/LineageGraph.thrift               |   2 +-
 common/thrift/Logging.thrift                    |   2 +-
 common/thrift/Metrics.thrift                    |   2 +-
 common/thrift/Partitions.thrift                 |   2 +-
 common/thrift/PlanNodes.thrift                  |   2 +-
 common/thrift/Planner.thrift                    |   2 +-
 common/thrift/Results.thrift                    |   2 +-
 common/thrift/RuntimeProfile.thrift             |   2 +-
 common/thrift/StatestoreService.thrift          |   2 +-
 common/thrift/Status.thrift                     |   2 +-
 common/thrift/Types.thrift                      |   2 +-
 common/thrift/generate_error_codes.py           |   2 +-
 common/thrift/generate_metrics.py               |   2 +-
 ext-data-source/api/pom.xml                     |   6 +-
 .../extdatasource/util/SerializationUtils.java  |   2 +-
 .../extdatasource/v1/ExternalDataSource.java    |  18 +-
 ext-data-source/pom.xml                         |   4 +-
 ext-data-source/sample/pom.xml                  |   8 +-
 .../extdatasource/sample/EchoDataSource.java    |  28 +--
 ext-data-source/test/pom.xml                    |   8 +-
 .../extdatasource/AllTypesDataSource.java       |  44 ++---
 fe/pom.xml                                      |   6 +-
 fe/src/main/cup/sql-parser.cup                  |  42 ++--
 .../apache/impala/analysis/AggregateInfo.java   |  12 +-
 .../impala/analysis/AggregateInfoBase.java      |   8 +-
 .../analysis/AlterTableAddPartitionStmt.java    |  18 +-
 .../analysis/AlterTableAddReplaceColsStmt.java  |  16 +-
 .../analysis/AlterTableChangeColStmt.java       |  14 +-
 .../impala/analysis/AlterTableDropColStmt.java  |  14 +-
 .../analysis/AlterTableDropPartitionStmt.java   |  12 +-
 .../analysis/AlterTableOrViewRenameStmt.java    |  18 +-
 .../AlterTableRecoverPartitionsStmt.java        |  10 +-
 .../analysis/AlterTableSetCachedStmt.java       |  16 +-
 .../analysis/AlterTableSetColumnStats.java      |  14 +-
 .../analysis/AlterTableSetFileFormatStmt.java   |  12 +-
 .../analysis/AlterTableSetLocationStmt.java     |  18 +-
 .../impala/analysis/AlterTableSetStmt.java      |  10 +-
 .../analysis/AlterTableSetTblProperties.java    |  18 +-
 .../apache/impala/analysis/AlterTableStmt.java  |  18 +-
 .../apache/impala/analysis/AlterViewStmt.java   |  12 +-
 .../apache/impala/analysis/AnalysisContext.java |  32 +--
 .../apache/impala/analysis/AnalyticExpr.java    |  28 +--
 .../apache/impala/analysis/AnalyticInfo.java    |   4 +-
 .../apache/impala/analysis/AnalyticWindow.java  |  20 +-
 .../org/apache/impala/analysis/Analyzer.java    |  76 +++----
 .../apache/impala/analysis/ArithmeticExpr.java  |  18 +-
 .../impala/analysis/AuthorizationStmt.java      |   6 +-
 .../apache/impala/analysis/BaseTableRef.java    |   8 +-
 .../impala/analysis/BetweenPredicate.java       |   6 +-
 .../apache/impala/analysis/BinaryPredicate.java |  22 +--
 .../org/apache/impala/analysis/BoolLiteral.java |  12 +-
 .../org/apache/impala/analysis/CaseExpr.java    |  22 +--
 .../apache/impala/analysis/CaseWhenClause.java  |   2 +-
 .../org/apache/impala/analysis/CastExpr.java    |  26 +--
 .../impala/analysis/CollectionStructType.java   |  14 +-
 .../impala/analysis/CollectionTableRef.java     |   8 +-
 .../org/apache/impala/analysis/ColumnDef.java   |  10 +-
 .../impala/analysis/ColumnLineageGraph.java     |  18 +-
 .../impala/analysis/CompoundPredicate.java      |  16 +-
 .../impala/analysis/ComputeStatsStmt.java       |  28 +--
 .../impala/analysis/CreateDataSrcStmt.java      |  12 +-
 .../apache/impala/analysis/CreateDbStmt.java    |  10 +-
 .../impala/analysis/CreateDropRoleStmt.java     |   8 +-
 .../impala/analysis/CreateFunctionStmtBase.java |  22 +--
 .../analysis/CreateOrAlterViewStmtBase.java     |   8 +-
 .../analysis/CreateTableAsSelectStmt.java       |  24 +--
 .../impala/analysis/CreateTableDataSrcStmt.java |  24 +--
 .../analysis/CreateTableLikeFileStmt.java       |  28 +--
 .../impala/analysis/CreateTableLikeStmt.java    |  16 +-
 .../apache/impala/analysis/CreateTableStmt.java |  34 ++--
 .../apache/impala/analysis/CreateUdaStmt.java   |  16 +-
 .../apache/impala/analysis/CreateUdfStmt.java   |  24 +--
 .../apache/impala/analysis/CreateViewStmt.java  |  12 +-
 .../org/apache/impala/analysis/DeleteStmt.java  |  10 +-
 .../apache/impala/analysis/DescribeDbStmt.java  |  10 +-
 .../impala/analysis/DescribeTableStmt.java      |  18 +-
 .../apache/impala/analysis/DescriptorTable.java |  10 +-
 .../apache/impala/analysis/DistributeParam.java |  16 +-
 .../apache/impala/analysis/DropDataSrcStmt.java |   6 +-
 .../org/apache/impala/analysis/DropDbStmt.java  |  10 +-
 .../impala/analysis/DropFunctionStmt.java       |  20 +-
 .../apache/impala/analysis/DropStatsStmt.java   |  12 +-
 .../impala/analysis/DropTableOrViewStmt.java    |  14 +-
 .../impala/analysis/EquivalenceClassId.java     |   6 +-
 .../apache/impala/analysis/ExistsPredicate.java |   6 +-
 .../java/org/apache/impala/analysis/Expr.java   |  24 +--
 .../java/org/apache/impala/analysis/ExprId.java |   6 +-
 .../impala/analysis/ExprSubstitutionMap.java    |   2 +-
 .../apache/impala/analysis/ExtractFromExpr.java |  10 +-
 .../org/apache/impala/analysis/FromClause.java  |   4 +-
 .../apache/impala/analysis/FunctionArgs.java    |   6 +-
 .../impala/analysis/FunctionCallExpr.java       |  30 +--
 .../apache/impala/analysis/FunctionName.java    |  10 +-
 .../apache/impala/analysis/FunctionParams.java  |   2 +-
 .../impala/analysis/GrantRevokePrivStmt.java    |  10 +-
 .../impala/analysis/GrantRevokeRoleStmt.java    |   6 +-
 .../apache/impala/analysis/HdfsCachingOp.java   |   8 +-
 .../org/apache/impala/analysis/HdfsUri.java     |  14 +-
 .../org/apache/impala/analysis/InPredicate.java |  20 +-
 .../apache/impala/analysis/InlineViewRef.java   |  12 +-
 .../org/apache/impala/analysis/InsertStmt.java  |  28 +--
 .../impala/analysis/IsNotEmptyPredicate.java    |   8 +-
 .../apache/impala/analysis/IsNullPredicate.java |  26 +--
 .../apache/impala/analysis/JoinOperator.java    |   4 +-
 .../apache/impala/analysis/LikePredicate.java   |  16 +-
 .../apache/impala/analysis/LimitElement.java    |  10 +-
 .../org/apache/impala/analysis/LiteralExpr.java |  20 +-
 .../apache/impala/analysis/LoadDataStmt.java    |  28 +--
 .../org/apache/impala/analysis/ModifyStmt.java  |  18 +-
 .../org/apache/impala/analysis/NullLiteral.java |   8 +-
 .../apache/impala/analysis/NumericLiteral.java  |  20 +-
 .../apache/impala/analysis/OrderByElement.java  |   2 +-
 .../org/apache/impala/analysis/ParseNode.java   |   4 +-
 .../impala/analysis/PartitionKeyValue.java      |   4 +-
 .../impala/analysis/PartitionListItem.java      |   2 +-
 .../apache/impala/analysis/PartitionSpec.java   |  16 +-
 .../java/org/apache/impala/analysis/Path.java   |  16 +-
 .../org/apache/impala/analysis/Predicate.java   |  10 +-
 .../apache/impala/analysis/PrivilegeSpec.java   |  22 +--
 .../org/apache/impala/analysis/QueryStmt.java   |   8 +-
 .../impala/analysis/ResetMetadataStmt.java      |  14 +-
 .../org/apache/impala/analysis/SelectList.java  |   2 +-
 .../apache/impala/analysis/SelectListItem.java  |   2 +-
 .../org/apache/impala/analysis/SelectStmt.java  |  22 +--
 .../org/apache/impala/analysis/SetStmt.java     |   4 +-
 .../impala/analysis/ShowCreateFunctionStmt.java |  16 +-
 .../impala/analysis/ShowCreateTableStmt.java    |  14 +-
 .../impala/analysis/ShowDataSrcsStmt.java       |   6 +-
 .../org/apache/impala/analysis/ShowDbsStmt.java |   6 +-
 .../apache/impala/analysis/ShowFilesStmt.java   |  16 +-
 .../impala/analysis/ShowFunctionsStmt.java      |  10 +-
 .../impala/analysis/ShowGrantRoleStmt.java      |  10 +-
 .../impala/analysis/ShowPartitionsStmt.java     |   6 +-
 .../apache/impala/analysis/ShowRolesStmt.java   |  10 +-
 .../apache/impala/analysis/ShowStatsStmt.java   |  12 +-
 .../apache/impala/analysis/ShowTablesStmt.java  |   8 +-
 .../impala/analysis/SingularRowSrcTableRef.java |   4 +-
 .../apache/impala/analysis/SlotDescriptor.java  |  12 +-
 .../java/org/apache/impala/analysis/SlotId.java |   6 +-
 .../org/apache/impala/analysis/SlotRef.java     |  18 +-
 .../org/apache/impala/analysis/SortInfo.java    |   2 +-
 .../apache/impala/analysis/StatementBase.java   |   8 +-
 .../apache/impala/analysis/StmtRewriter.java    |   8 +-
 .../apache/impala/analysis/StringLiteral.java   |  14 +-
 .../org/apache/impala/analysis/Subquery.java    |  12 +-
 .../org/apache/impala/analysis/TableName.java   |   6 +-
 .../org/apache/impala/analysis/TableRef.java    |  12 +-
 .../analysis/TimestampArithmeticExpr.java       |  12 +-
 .../org/apache/impala/analysis/ToSqlUtils.java  |  24 +--
 .../apache/impala/analysis/TruncateStmt.java    |  16 +-
 .../apache/impala/analysis/TupleDescriptor.java |  14 +-
 .../org/apache/impala/analysis/TupleId.java     |   6 +-
 .../impala/analysis/TupleIsNullPredicate.java   |  12 +-
 .../org/apache/impala/analysis/TypeDef.java     |  18 +-
 .../org/apache/impala/analysis/TypesUtil.java   |   8 +-
 .../org/apache/impala/analysis/UnionStmt.java   |   6 +-
 .../org/apache/impala/analysis/UpdateStmt.java  |  10 +-
 .../org/apache/impala/analysis/UseStmt.java     |  10 +-
 .../org/apache/impala/analysis/ValuesStmt.java  |   2 +-
 .../org/apache/impala/analysis/WithClause.java  |   8 +-
 .../authorization/AuthorizationChecker.java     |   8 +-
 .../authorization/AuthorizationConfig.java      |   2 +-
 .../impala/authorization/Authorizeable.java     |   2 +-
 .../authorization/AuthorizeableColumn.java      |   2 +-
 .../impala/authorization/AuthorizeableDb.java   |   2 +-
 .../impala/authorization/AuthorizeableFn.java   |   2 +-
 .../authorization/AuthorizeableServer.java      |   2 +-
 .../authorization/AuthorizeableTable.java       |   2 +-
 .../impala/authorization/AuthorizeableUri.java  |   2 +-
 .../authorization/ImpalaInternalAdminUser.java  |   2 +-
 .../apache/impala/authorization/Privilege.java  |   2 +-
 .../impala/authorization/PrivilegeRequest.java  |   2 +-
 .../authorization/PrivilegeRequestBuilder.java  |   2 +-
 .../impala/authorization/SentryConfig.java      |   4 +-
 .../org/apache/impala/authorization/User.java   |   8 +-
 .../impala/catalog/AggregateFunction.java       |  12 +-
 .../org/apache/impala/catalog/ArrayType.java    |   8 +-
 .../impala/catalog/AuthorizationException.java  |   2 +-
 .../impala/catalog/AuthorizationPolicy.java     |  14 +-
 .../org/apache/impala/catalog/BuiltinsDb.java   |  20 +-
 .../java/org/apache/impala/catalog/Catalog.java |  16 +-
 .../apache/impala/catalog/CatalogDeltaLog.java  |   6 +-
 .../apache/impala/catalog/CatalogException.java |   4 +-
 .../apache/impala/catalog/CatalogObject.java    |   4 +-
 .../impala/catalog/CatalogObjectCache.java      |   2 +-
 .../impala/catalog/CatalogServiceCatalog.java   |  46 ++---
 .../java/org/apache/impala/catalog/Column.java  |   6 +-
 .../impala/catalog/ColumnNotFoundException.java |   2 +-
 .../org/apache/impala/catalog/ColumnStats.java  |   8 +-
 .../org/apache/impala/catalog/DataSource.java   |   6 +-
 .../apache/impala/catalog/DataSourceTable.java  |  24 +--
 .../catalog/DatabaseNotFoundException.java      |   2 +-
 .../main/java/org/apache/impala/catalog/Db.java |  22 +--
 .../org/apache/impala/catalog/Function.java     |  32 +--
 .../org/apache/impala/catalog/HBaseColumn.java  |   4 +-
 .../org/apache/impala/catalog/HBaseTable.java   |  24 +--
 .../apache/impala/catalog/HdfsCachePool.java    |   6 +-
 .../apache/impala/catalog/HdfsCompression.java  |   4 +-
 .../apache/impala/catalog/HdfsFileFormat.java   |   4 +-
 .../apache/impala/catalog/HdfsPartition.java    |  40 ++--
 .../HdfsPartitionLocationCompressor.java        |   8 +-
 .../impala/catalog/HdfsStorageDescriptor.java   |   2 +-
 .../org/apache/impala/catalog/HdfsTable.java    |  78 ++++----
 .../catalog/HiveStorageDescriptorFactory.java   |   4 +-
 .../apache/impala/catalog/ImpaladCatalog.java   |  28 +--
 .../apache/impala/catalog/IncompleteTable.java  |  16 +-
 .../org/apache/impala/catalog/KuduColumn.java   |   4 +-
 .../org/apache/impala/catalog/KuduTable.java    |  26 +--
 .../java/org/apache/impala/catalog/MapType.java |   8 +-
 .../impala/catalog/MetaStoreClientPool.java     |   2 +-
 .../catalog/PartitionNotFoundException.java     |   2 +-
 .../impala/catalog/PartitionStatsUtil.java      |  12 +-
 .../apache/impala/catalog/PrimitiveType.java    |   4 +-
 .../java/org/apache/impala/catalog/Role.java    |   6 +-
 .../apache/impala/catalog/RolePrivilege.java    |  10 +-
 .../org/apache/impala/catalog/RowFormat.java    |   6 +-
 .../apache/impala/catalog/ScalarFunction.java   |  18 +-
 .../org/apache/impala/catalog/ScalarType.java   |  12 +-
 .../org/apache/impala/catalog/StructField.java  |   8 +-
 .../org/apache/impala/catalog/StructType.java   |  10 +-
 .../java/org/apache/impala/catalog/Table.java   |  28 +--
 .../java/org/apache/impala/catalog/TableId.java |   6 +-
 .../org/apache/impala/catalog/TableLoader.java  |   4 +-
 .../impala/catalog/TableLoadingException.java   |   2 +-
 .../apache/impala/catalog/TableLoadingMgr.java  |   6 +-
 .../impala/catalog/TableNotFoundException.java  |   2 +-
 .../java/org/apache/impala/catalog/Type.java    |  24 +--
 .../java/org/apache/impala/catalog/View.java    |  18 +-
 .../impala/catalog/delegates/DdlDelegate.java   |   8 +-
 .../catalog/delegates/KuduDdlDelegate.java      |  18 +-
 .../delegates/UnsupportedOpDelegate.java        |   4 +-
 .../apache/impala/common/AliasGenerator.java    |   2 +-
 .../apache/impala/common/AnalysisException.java |   2 +-
 .../org/apache/impala/common/ByteUnits.java     |   2 +-
 .../impala/common/ColumnAliasGenerator.java     |   2 +-
 .../apache/impala/common/FileSystemUtil.java    |   2 +-
 .../main/java/org/apache/impala/common/Id.java  |   2 +-
 .../org/apache/impala/common/IdGenerator.java   |   2 +-
 .../apache/impala/common/ImpalaException.java   |   2 +-
 .../impala/common/ImpalaRuntimeException.java   |   2 +-
 .../apache/impala/common/InternalException.java |   2 +-
 .../java/org/apache/impala/common/JniUtil.java  |   8 +-
 .../impala/common/NotImplementedException.java  |   2 +-
 .../java/org/apache/impala/common/Pair.java     |   2 +-
 .../org/apache/impala/common/PrintUtils.java    |  12 +-
 .../org/apache/impala/common/Reference.java     |   2 +-
 .../org/apache/impala/common/RuntimeEnv.java    |   6 +-
 .../impala/common/TableAliasGenerator.java      |   4 +-
 .../java/org/apache/impala/common/TreeNode.java |   4 +-
 .../apache/impala/extdatasource/ApiVersion.java |   6 +-
 .../ExternalDataSourceExecutor.java             |  32 +--
 .../hive/executor/ImpalaBigIntWritable.java     |   4 +-
 .../hive/executor/ImpalaBooleanWritable.java    |   4 +-
 .../hive/executor/ImpalaBytesWritable.java      |   2 +-
 .../hive/executor/ImpalaDoubleWritable.java     |   4 +-
 .../hive/executor/ImpalaFloatWritable.java      |   4 +-
 .../impala/hive/executor/ImpalaIntWritable.java |   4 +-
 .../hive/executor/ImpalaSmallIntWritable.java   |   4 +-
 .../hive/executor/ImpalaStringWritable.java     |   4 +-
 .../hive/executor/ImpalaTextWritable.java       |   2 +-
 .../hive/executor/ImpalaTinyIntWritable.java    |   4 +-
 .../impala/hive/executor/UdfExecutor.java       |  18 +-
 .../apache/impala/planner/AggregationNode.java  |  26 +--
 .../apache/impala/planner/AnalyticEvalNode.java |  24 +--
 .../apache/impala/planner/AnalyticPlanner.java  |  44 ++---
 .../org/apache/impala/planner/CohortId.java     |   6 +-
 .../apache/impala/planner/DataPartition.java    |  12 +-
 .../org/apache/impala/planner/DataSink.java     |  16 +-
 .../impala/planner/DataSourceScanNode.java      |  70 +++----
 .../apache/impala/planner/DataStreamSink.java   |  10 +-
 .../impala/planner/DistributedPlanner.java      |  24 +--
 .../org/apache/impala/planner/EmptySetNode.java |  12 +-
 .../org/apache/impala/planner/ExchangeNode.java |  22 +--
 .../apache/impala/planner/HBaseScanNode.java    |  54 ++---
 .../apache/impala/planner/HBaseTableSink.java   |  16 +-
 .../org/apache/impala/planner/HashJoinNode.java |  32 +--
 .../impala/planner/HdfsPartitionFilter.java     |  28 +--
 .../impala/planner/HdfsPartitionPruner.java     |  38 ++--
 .../org/apache/impala/planner/HdfsScanNode.java |  62 +++---
 .../apache/impala/planner/HdfsTableSink.java    |  24 +--
 .../apache/impala/planner/JoinBuildSink.java    |  24 +--
 .../org/apache/impala/planner/JoinNode.java     |  20 +-
 .../org/apache/impala/planner/JoinTableId.java  |   6 +-
 .../org/apache/impala/planner/KuduScanNode.java |  44 ++---
 .../apache/impala/planner/KuduTableSink.java    |  18 +-
 .../impala/planner/NestedLoopJoinNode.java      |  22 +--
 .../apache/impala/planner/ParallelPlanner.java  |   4 +-
 .../impala/planner/PipelinedPlanNodeSet.java    |   4 +-
 .../org/apache/impala/planner/PlanFragment.java |  36 ++--
 .../apache/impala/planner/PlanFragmentId.java   |   6 +-
 .../java/org/apache/impala/planner/PlanId.java  |   6 +-
 .../org/apache/impala/planner/PlanNode.java     |  32 +--
 .../org/apache/impala/planner/PlanNodeId.java   |   6 +-
 .../java/org/apache/impala/planner/Planner.java |  40 ++--
 .../apache/impala/planner/PlannerContext.java   |  14 +-
 .../impala/planner/RuntimeFilterGenerator.java  |  34 ++--
 .../apache/impala/planner/RuntimeFilterId.java  |   6 +-
 .../org/apache/impala/planner/ScanNode.java     |  18 +-
 .../org/apache/impala/planner/SelectNode.java   |  12 +-
 .../impala/planner/SingleNodePlanner.java       |  78 ++++----
 .../impala/planner/SingularRowSrcNode.java      |  12 +-
 .../org/apache/impala/planner/SortNode.java     |  30 +--
 .../org/apache/impala/planner/SubplanNode.java  |  12 +-
 .../org/apache/impala/planner/TableSink.java    |  14 +-
 .../org/apache/impala/planner/UnionNode.java    |  22 +--
 .../org/apache/impala/planner/UnnestNode.java   |  18 +-
 .../org/apache/impala/planner/ValueRange.java   |  18 +-
 .../apache/impala/service/BackendConfig.java    |   2 +-
 .../impala/service/CatalogOpExecutor.java       | 196 +++++++++----------
 .../impala/service/DescribeResultFactory.java   |  20 +-
 .../org/apache/impala/service/FeSupport.java    |  46 ++---
 .../org/apache/impala/service/Frontend.java     | 194 +++++++++---------
 .../org/apache/impala/service/JniCatalog.java   |  60 +++---
 .../org/apache/impala/service/JniFrontend.java  | 100 +++++-----
 .../org/apache/impala/service/MetadataOp.java   |  38 ++--
 .../apache/impala/util/AvroSchemaConverter.java |  22 +--
 .../apache/impala/util/AvroSchemaParser.java    |  20 +-
 .../org/apache/impala/util/AvroSchemaUtils.java |  10 +-
 .../org/apache/impala/util/DisjointSet.java     |   2 +-
 .../org/apache/impala/util/EventSequence.java   |   4 +-
 .../apache/impala/util/FileWatchService.java    |   2 +-
 .../apache/impala/util/FsPermissionChecker.java |   2 +-
 .../org/apache/impala/util/GlogAppender.java    |  12 +-
 .../org/apache/impala/util/HdfsCachingUtil.java |  16 +-
 .../java/org/apache/impala/util/KuduUtil.java   |  16 +-
 .../java/org/apache/impala/util/ListMap.java    |   2 +-
 .../impala/util/MaxRowsProcessedVisitor.java    |  10 +-
 .../apache/impala/util/MembershipSnapshot.java  |   6 +-
 .../org/apache/impala/util/MetaStoreUtil.java   |   6 +-
 .../org/apache/impala/util/NativeLibUtil.java   |   2 +-
 .../org/apache/impala/util/NativeLogger.java    |   2 +-
 .../org/apache/impala/util/PatternMatcher.java  |   2 +-
 .../apache/impala/util/RequestPoolService.java  |  26 +--
 .../apache/impala/util/SentryPolicyService.java |  22 +--
 .../org/apache/impala/util/SentryProxy.java     |  22 +--
 .../org/apache/impala/util/StatsHelper.java     |   2 +-
 .../apache/impala/util/TAccessLevelUtil.java    |   4 +-
 .../apache/impala/util/TColumnValueUtil.java    |   4 +-
 .../apache/impala/util/TResultRowBuilder.java   |   8 +-
 .../apache/impala/util/TSessionStateUtil.java   |   4 +-
 .../java/org/apache/impala/util/UnsafeUtil.java |   2 +-
 .../java/org/apache/impala/util/Visitor.java    |   2 +-
 fe/src/main/jflex/sql-scanner.flex              |   4 +-
 .../impala/analysis/AnalyzeAuthStmtsTest.java   |  14 +-
 .../apache/impala/analysis/AnalyzeDDLTest.java  |  36 ++--
 .../impala/analysis/AnalyzeExprsTest.java       |  38 ++--
 .../impala/analysis/AnalyzeModifyStmtsTest.java |   4 +-
 .../impala/analysis/AnalyzeStmtsTest.java       |  10 +-
 .../impala/analysis/AnalyzeSubqueriesTest.java  |   6 +-
 .../apache/impala/analysis/AnalyzerTest.java    |  16 +-
 .../apache/impala/analysis/AuditingTest.java    |  24 +--
 .../impala/analysis/AuthorizationTest.java      |  60 +++---
 .../org/apache/impala/analysis/ExprTest.java    |   6 +-
 .../org/apache/impala/analysis/ParserTest.java  |   8 +-
 .../org/apache/impala/analysis/ToSqlTest.java   |  10 +-
 .../apache/impala/analysis/TypesUtilTest.java   |  16 +-
 .../catalog/CatalogObjectToFromThriftTest.java  |  24 +--
 .../org/apache/impala/catalog/CatalogTest.java  |  18 +-
 .../impala/catalog/HdfsPartitionTest.java       |   6 +-
 .../catalog/HdfsStorageDescriptorTest.java      |   8 +-
 .../apache/impala/catalog/TestSchemaUtils.java  |   2 +-
 .../apache/impala/common/FrontendTestBase.java  |  60 +++---
 .../apache/impala/hive/executor/TestUdf.java    |   2 +-
 .../impala/hive/executor/UdfExecutorTest.java   |  18 +-
 .../org/apache/impala/planner/PlannerTest.java  |  12 +-
 .../apache/impala/planner/PlannerTestBase.java  |  66 +++----
 .../apache/impala/planner/S3PlannerTest.java    |   4 +-
 .../org/apache/impala/service/FrontendTest.java |  28 +--
 .../org/apache/impala/service/JdbcTest.java     |  10 +-
 .../impala/testutil/BlockIdGenerator.java       |  18 +-
 .../testutil/CatalogServiceTestCatalog.java     |  12 +-
 .../impala/testutil/ImpalaJdbcClient.java       |   2 +-
 .../impala/testutil/ImpaladTestCatalog.java     |  18 +-
 .../impala/testutil/SentryServicePinger.java    |   8 +-
 .../apache/impala/testutil/TestFileParser.java  |   2 +-
 .../org/apache/impala/testutil/TestUtils.java   |  20 +-
 .../org/apache/impala/util/KuduUtilTest.java    |   4 +-
 .../org/apache/impala/util/TestDisjointSet.java |   2 +-
 .../impala/util/TestRequestPoolService.java     |  14 +-
 testdata/ComplexTypesTbl/nonnullable.avsc       |   2 +-
 testdata/ComplexTypesTbl/nullable.avsc          |   2 +-
 testdata/bin/create-data-source-table.sql       |   2 +-
 testdata/bin/generate-block-ids.sh              |   2 +-
 testdata/bin/generate-load-nested.sh            |   4 +-
 testdata/bin/random_avro_schema.py              |   2 +-
 testdata/bin/run-sentry-service.sh              |   2 +-
 testdata/bin/split-hbase.sh                     |   2 +-
 testdata/data/README                            |   2 +-
 testdata/data/schemas/nested/README             |   6 +-
 testdata/data/schemas/nested/nested.avsc        |   2 +-
 testdata/parquet_schema_resolution/README       |   2 +-
 .../parquet_schema_resolution/switched_map.avsc |   2 +-
 testdata/pom.xml                                |   2 +-
 .../datagenerator/CsvToHBaseConverter.java      |   2 +-
 .../HBaseTestDataRegionAssigment.java           |   2 +-
 .../datagenerator/JsonToParquetConverter.java   |   2 +-
 .../RandomNestedDataGenerator.java              |   2 +-
 .../impala/datagenerator/TestDataGenerator.java |   2 +-
 .../queries/QueryTest/load-java-udfs.test       |  34 ++--
 .../queries/QueryTest/show-data-sources.test    |  10 +-
 tests/custom_cluster/test_permanent_udfs.py     |  20 +-
 tests/metadata/test_ddl.py                      |   2 +-
 .../metadata/test_metadata_query_statements.py  |   2 +-
 tests/query_test/test_udfs.py                   |   4 +-
 tests/test-hive-udfs/pom.xml                    |   2 +-
 .../org/apache/impala/IncompatibleUdfTest.java  |   2 +-
 .../java/org/apache/impala/JavaUdfTest.java     |   2 +-
 .../main/java/org/apache/impala/TestUdf.java    |   2 +-
 .../org/apache/impala/TestUdfException.java     |   2 +-
 .../java/org/apache/impala/TestUpdateUdf.java   |   2 +-
 .../java/org/apache/impala/UnresolvedUdf.java   |   2 +-
 441 files changed, 2864 insertions(+), 2864 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/benchmarks/expr-benchmark.cc
----------------------------------------------------------------------
diff --git a/be/src/benchmarks/expr-benchmark.cc b/be/src/benchmarks/expr-benchmark.cc
index e7788be..b95e6d7 100644
--- a/be/src/benchmarks/expr-benchmark.cc
+++ b/be/src/benchmarks/expr-benchmark.cc
@@ -58,7 +58,7 @@ class Planner {
   Planner() {
     JNIEnv* jni_env = getJNIEnv();
     // create instance of java class JniFrontend
-    jclass fe_class = jni_env->FindClass("com/cloudera/impala/service/JniFrontend");
+    jclass fe_class = jni_env->FindClass("org/apache/impala/service/JniFrontend");
     jmethodID fe_ctor = jni_env->GetMethodID(fe_class, "<init>", "(Z)V");
     EXIT_IF_EXC(jni_env);
     create_exec_request_id_ =
@@ -91,7 +91,7 @@ class Planner {
   }
 
  private:
-  jobject fe_;  // instance of com.cloudera.impala.service.JniFrontend
+  jobject fe_;  // instance of org.apache.impala.service.JniFrontend
   jmethodID create_exec_request_id_;  // JniFrontend.createExecRequest()
 
   TQueryOptions query_options_;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/catalog/catalog.cc
----------------------------------------------------------------------
diff --git a/be/src/catalog/catalog.cc b/be/src/catalog/catalog.cc
index cef341a..44771c8 100644
--- a/be/src/catalog/catalog.cc
+++ b/be/src/catalog/catalog.cc
@@ -59,7 +59,7 @@ Catalog::Catalog() {
 
   JNIEnv* jni_env = getJNIEnv();
   // Create an instance of the java class JniCatalog
-  catalog_class_ = jni_env->FindClass("com/cloudera/impala/service/JniCatalog");
+  catalog_class_ = jni_env->FindClass("org/apache/impala/service/JniCatalog");
   EXIT_IF_EXC(jni_env);
 
   uint32_t num_methods = sizeof(methods) / sizeof(methods[0]);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/catalog/catalog.h
----------------------------------------------------------------------
diff --git a/be/src/catalog/catalog.h b/be/src/catalog/catalog.h
index 3fb0646..ab6a2a3 100644
--- a/be/src/catalog/catalog.h
+++ b/be/src/catalog/catalog.h
@@ -104,7 +104,7 @@ class Catalog {
   /// Descriptor of Java Catalog class itself, used to create a new instance.
   jclass catalog_class_;
 
-  jobject catalog_;  // instance of com.cloudera.impala.service.JniCatalog
+  jobject catalog_;  // instance of org.apache.impala.service.JniCatalog
   jmethodID update_metastore_id_;  // JniCatalog.updateMetaastore()
   jmethodID exec_ddl_id_;  // JniCatalog.execDdl()
   jmethodID reset_metadata_id_;  // JniCatalog.resetMetdata()

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/exec/external-data-source-executor.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/external-data-source-executor.cc b/be/src/exec/external-data-source-executor.cc
index 5c8d89e..df93893 100644
--- a/be/src/exec/external-data-source-executor.cc
+++ b/be/src/exec/external-data-source-executor.cc
@@ -62,7 +62,7 @@ class ExternalDataSourceExecutor::JniState {
     RETURN_IF_ERROR(jni_frame.push(env));
 
     RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env,
-        "com/cloudera/impala/extdatasource/ExternalDataSourceExecutor",
+        "org/apache/impala/extdatasource/ExternalDataSourceExecutor",
         &executor_class_));
     uint32_t num_methods = sizeof(methods) / sizeof(methods[0]);
     for (int i = 0; i < num_methods; ++i) {
@@ -100,7 +100,7 @@ class ExternalDataSourceExecutor::JniState {
     return Status::OK();
   }
 
-  /// Class reference for com.cloudera.impala.extdatasource.ExternalDataSourceExecutor
+  /// Class reference for org.apache.impala.extdatasource.ExternalDataSourceExecutor
   jclass executor_class_;
 
   jmethodID ctor_;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/exec/external-data-source-executor.h
----------------------------------------------------------------------
diff --git a/be/src/exec/external-data-source-executor.h b/be/src/exec/external-data-source-executor.h
index 5335ba5..cdeaf84 100644
--- a/be/src/exec/external-data-source-executor.h
+++ b/be/src/exec/external-data-source-executor.h
@@ -66,7 +66,7 @@ class ExternalDataSourceExecutor {
 
   bool is_initialized_; // Set true in Init() to ensure the class is initialized.
 
-  /// Instance of com.cloudera.impala.extdatasource.ExternalDataSourceExecutor
+  /// Instance of org.apache.impala.extdatasource.ExternalDataSourceExecutor
   jobject executor_;
 };
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/exprs/hive-udf-call.cc
----------------------------------------------------------------------
diff --git a/be/src/exprs/hive-udf-call.cc b/be/src/exprs/hive-udf-call.cc
index 8b8ea4d..c7a3f32 100644
--- a/be/src/exprs/hive-udf-call.cc
+++ b/be/src/exprs/hive-udf-call.cc
@@ -33,7 +33,7 @@
 
 #include "common/names.h"
 
-const char* EXECUTOR_CLASS = "com/cloudera/impala/hive/executor/UdfExecutor";
+const char* EXECUTOR_CLASS = "org/apache/impala/hive/executor/UdfExecutor";
 const char* EXECUTOR_CTOR_SIGNATURE ="([B)V";
 const char* EXECUTOR_EVALUATE_SIGNATURE = "()V";
 const char* EXECUTOR_CLOSE_SIGNATURE = "()V";

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/scheduling/request-pool-service.cc
----------------------------------------------------------------------
diff --git a/be/src/scheduling/request-pool-service.cc b/be/src/scheduling/request-pool-service.cc
index ea2553e..6c9ccdb 100644
--- a/be/src/scheduling/request-pool-service.cc
+++ b/be/src/scheduling/request-pool-service.cc
@@ -123,7 +123,7 @@ RequestPoolService::RequestPoolService(MetricGroup* metrics) :
 
   JNIEnv* jni_env = getJNIEnv();
   request_pool_service_class_ =
-    jni_env->FindClass("com/cloudera/impala/util/RequestPoolService");
+    jni_env->FindClass("org/apache/impala/util/RequestPoolService");
   EXIT_IF_EXC(jni_env);
   uint32_t num_methods = sizeof(methods) / sizeof(methods[0]);
   for (int i = 0; i < num_methods; ++i) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/scheduling/request-pool-service.h
----------------------------------------------------------------------
diff --git a/be/src/scheduling/request-pool-service.h b/be/src/scheduling/request-pool-service.h
index fdfe11d..935bae8 100644
--- a/be/src/scheduling/request-pool-service.h
+++ b/be/src/scheduling/request-pool-service.h
@@ -71,7 +71,7 @@ class RequestPoolService {
   /// The following members are not initialized if default_pool_only_ is true.
   /// Descriptor of Java RequestPoolService class itself, used to create a new instance.
   jclass request_pool_service_class_;
-  /// Instance of com.cloudera.impala.util.RequestPoolService
+  /// Instance of org.apache.impala.util.RequestPoolService
   jobject request_pool_service_;
   jmethodID resolve_request_pool_id_;  // RequestPoolService.resolveRequestPool()
   jmethodID get_pool_config_id_;  // RequestPoolService.getPoolConfig()

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/service/fe-support.cc
----------------------------------------------------------------------
diff --git a/be/src/service/fe-support.cc b/be/src/service/fe-support.cc
index f3fc445..996770c 100644
--- a/be/src/service/fe-support.cc
+++ b/be/src/service/fe-support.cc
@@ -56,7 +56,7 @@ using namespace apache::thrift::server;
 // This is not called when we first start up the BE.
 extern "C"
 JNIEXPORT void JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativeFeTestInit(
+Java_org_apache_impala_service_FeSupport_NativeFeTestInit(
     JNIEnv* env, jclass caller_class) {
   DCHECK(ExecEnv::GetInstance() == NULL) << "This should only be called once from the FE";
   char* name = const_cast<char*>("FeSupport");
@@ -72,7 +72,7 @@ Java_com_cloudera_impala_service_FeSupport_NativeFeTestInit(
 // called.
 extern "C"
 JNIEXPORT jbyteArray JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativeEvalConstExprs(
+Java_org_apache_impala_service_FeSupport_NativeEvalConstExprs(
     JNIEnv* env, jclass caller_class, jbyteArray thrift_expr_batch,
     jbyteArray thrift_query_ctx_bytes) {
   jbyteArray result_bytes = NULL;
@@ -256,7 +256,7 @@ static void ResolveSymbolLookup(const TSymbolLookupParams params,
 
 extern "C"
 JNIEXPORT jbyteArray JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativeCacheJar(
+Java_org_apache_impala_service_FeSupport_NativeCacheJar(
     JNIEnv* env, jclass caller_class, jbyteArray thrift_struct) {
   TCacheJarParams params;
   DeserializeThriftMsg(env, thrift_struct, &params);
@@ -276,7 +276,7 @@ Java_com_cloudera_impala_service_FeSupport_NativeCacheJar(
 
 extern "C"
 JNIEXPORT jbyteArray JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativeLookupSymbol(
+Java_org_apache_impala_service_FeSupport_NativeLookupSymbol(
     JNIEnv* env, jclass caller_class, jbyteArray thrift_struct) {
   TSymbolLookupParams lookup;
   DeserializeThriftMsg(env, thrift_struct, &lookup);
@@ -299,7 +299,7 @@ Java_com_cloudera_impala_service_FeSupport_NativeLookupSymbol(
 // specific catalog objects.
 extern "C"
 JNIEXPORT jbyteArray JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativePrioritizeLoad(
+Java_org_apache_impala_service_FeSupport_NativePrioritizeLoad(
     JNIEnv* env, jclass caller_class, jbyteArray thrift_struct) {
   TPrioritizeLoadRequest request;
   DeserializeThriftMsg(env, thrift_struct, &request);
@@ -323,7 +323,7 @@ Java_com_cloudera_impala_service_FeSupport_NativePrioritizeLoad(
 
 extern "C"
 JNIEXPORT jbyteArray JNICALL
-Java_com_cloudera_impala_service_FeSupport_NativeGetStartupOptions(JNIEnv* env,
+Java_org_apache_impala_service_FeSupport_NativeGetStartupOptions(JNIEnv* env,
     jclass caller_class) {
   TStartupOptions options;
   ExecEnv* exec_env = ExecEnv::GetInstance();
@@ -340,33 +340,33 @@ namespace impala {
 static JNINativeMethod native_methods[] = {
   {
     (char*)"NativeFeTestInit", (char*)"()V",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeFeTestInit
+    (void*)::Java_org_apache_impala_service_FeSupport_NativeFeTestInit
   },
   {
     (char*)"NativeEvalConstExprs", (char*)"([B[B)[B",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeEvalConstExprs
+    (void*)::Java_org_apache_impala_service_FeSupport_NativeEvalConstExprs
   },
   {
     (char*)"NativeCacheJar", (char*)"([B)[B",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeCacheJar
+    (void*)::Java_org_apache_impala_service_FeSupport_NativeCacheJar
   },
   {
     (char*)"NativeLookupSymbol", (char*)"([B)[B",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeLookupSymbol
+    (void*)::Java_org_apache_impala_service_FeSupport_NativeLookupSymbol
   },
   {
     (char*)"NativePrioritizeLoad", (char*)"([B)[B",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativePrioritizeLoad
+    (void*)::Java_org_apache_impala_service_FeSupport_NativePrioritizeLoad
   },
   {
     (char*)"NativeGetStartupOptions", (char*)"()[B",
-    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeGetStartupOptions
+    (void*)::Java_org_apache_impala_service_FeSupport_NativeGetStartupOptions
   },
 };
 
 void InitFeSupport() {
   JNIEnv* env = getJNIEnv();
-  jclass native_backend_cl = env->FindClass("com/cloudera/impala/service/FeSupport");
+  jclass native_backend_cl = env->FindClass("org/apache/impala/service/FeSupport");
   env->RegisterNatives(native_backend_cl, native_methods,
       sizeof(native_methods) / sizeof(native_methods[0]));
   EXIT_IF_EXC(env);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/service/frontend.cc
----------------------------------------------------------------------
diff --git a/be/src/service/frontend.cc b/be/src/service/frontend.cc
index d7357f6..107354b 100644
--- a/be/src/service/frontend.cc
+++ b/be/src/service/frontend.cc
@@ -89,7 +89,7 @@ Frontend::Frontend() {
 
   JNIEnv* jni_env = getJNIEnv();
   // create instance of java class JniFrontend
-  fe_class_ = jni_env->FindClass("com/cloudera/impala/service/JniFrontend");
+  fe_class_ = jni_env->FindClass("org/apache/impala/service/JniFrontend");
   EXIT_IF_EXC(jni_env);
 
   uint32_t num_methods = sizeof(methods) / sizeof(methods[0]);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/service/frontend.h
----------------------------------------------------------------------
diff --git a/be/src/service/frontend.h b/be/src/service/frontend.h
index 875789d..29dec68 100644
--- a/be/src/service/frontend.h
+++ b/be/src/service/frontend.h
@@ -172,7 +172,7 @@ class Frontend {
   /// Descriptor of Java Frontend class itself, used to create a new instance.
   jclass fe_class_;
 
-  jobject fe_;  // instance of com.cloudera.impala.service.JniFrontend
+  jobject fe_;  // instance of org.apache.impala.service.JniFrontend
   jmethodID create_exec_request_id_;  // JniFrontend.createExecRequest()
   jmethodID get_explain_plan_id_;  // JniFrontend.getExplainPlan()
   jmethodID get_hadoop_config_id_;  // JniFrontend.getHadoopConfig(byte[])

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/util/jni-util.cc
----------------------------------------------------------------------
diff --git a/be/src/util/jni-util.cc b/be/src/util/jni-util.cc
index 30edaf9..3d9f6a4 100644
--- a/be/src/util/jni-util.cc
+++ b/be/src/util/jni-util.cc
@@ -82,7 +82,7 @@ Status JniUtil::Init() {
   JNIEnv* env = getJNIEnv();
   if (env == NULL) return Status("Failed to get/create JVM");
   // Find JniUtil class and create a global ref.
-  jclass local_jni_util_cl = env->FindClass("com/cloudera/impala/common/JniUtil");
+  jclass local_jni_util_cl = env->FindClass("org/apache/impala/common/JniUtil");
   if (local_jni_util_cl == NULL) {
     if (env->ExceptionOccurred()) env->ExceptionDescribe();
     return Status("Failed to find JniUtil class.");
@@ -99,7 +99,7 @@ Status JniUtil::Init() {
 
   // Find InternalException class and create a global ref.
   jclass local_internal_exc_cl =
-      env->FindClass("com/cloudera/impala/common/InternalException");
+      env->FindClass("org/apache/impala/common/InternalException");
   if (local_internal_exc_cl == NULL) {
     if (env->ExceptionOccurred()) env->ExceptionDescribe();
     return Status("Failed to find JniUtil class.");

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/be/src/util/logging-support.cc
----------------------------------------------------------------------
diff --git a/be/src/util/logging-support.cc b/be/src/util/logging-support.cc
index 1e202d7..6958e43 100644
--- a/be/src/util/logging-support.cc
+++ b/be/src/util/logging-support.cc
@@ -33,7 +33,7 @@ DEFINE_int32(non_impala_java_vlog, 0, "(Advanced) The log level (equivalent to -
 // service to log messages to Glog.
 extern "C"
 JNIEXPORT void JNICALL
-Java_com_cloudera_impala_util_NativeLogger_Log(
+Java_org_apache_impala_util_NativeLogger_Log(
     JNIEnv* env, jclass caller_class, int severity, jstring msg, jstring file,
     int line_number) {
 
@@ -79,10 +79,10 @@ namespace impala {
 void InitJvmLoggingSupport() {
   JNIEnv* env = getJNIEnv();
   JNINativeMethod nm;
-  jclass native_backend_cl = env->FindClass("com/cloudera/impala/util/NativeLogger");
+  jclass native_backend_cl = env->FindClass("org/apache/impala/util/NativeLogger");
   nm.name = const_cast<char*>("Log");
   nm.signature = const_cast<char*>("(ILjava/lang/String;Ljava/lang/String;I)V");
-  nm.fnPtr = reinterpret_cast<void*>(::Java_com_cloudera_impala_util_NativeLogger_Log);
+  nm.fnPtr = reinterpret_cast<void*>(::Java_org_apache_impala_util_NativeLogger_Log);
   env->RegisterNatives(native_backend_cl, &nm, 1);
   EXIT_IF_EXC(env);
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/bin/create_testdata.sh
----------------------------------------------------------------------
diff --git a/bin/create_testdata.sh b/bin/create_testdata.sh
index 57b9cc1..cc296bf 100755
--- a/bin/create_testdata.sh
+++ b/bin/create_testdata.sh
@@ -46,6 +46,6 @@ done
 # run test data generator
 echo $DATALOC
 mkdir -p $DATALOC
-"$JAVA" -cp $CP com.cloudera.impala.datagenerator.TestDataGenerator $DATALOC
-"$JAVA" -cp $CP com.cloudera.impala.datagenerator.CsvToHBaseConverter
+"$JAVA" -cp $CP org.apache.impala.datagenerator.TestDataGenerator $DATALOC
+"$JAVA" -cp $CP org.apache.impala.datagenerator.CsvToHBaseConverter
 echo "SUCCESS, data generated into $DATALOC"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/bin/run-jdbc-client.sh
----------------------------------------------------------------------
diff --git a/bin/run-jdbc-client.sh b/bin/run-jdbc-client.sh
index 66a8afe..e944b2d 100755
--- a/bin/run-jdbc-client.sh
+++ b/bin/run-jdbc-client.sh
@@ -18,4 +18,4 @@
 # under the License.
 
 . ${IMPALA_HOME}/bin/set-classpath.sh
-"$JAVA" -cp $CLASSPATH com.cloudera.impala.testutil.ImpalaJdbcClient "$@"
+"$JAVA" -cp $CLASSPATH org.apache.impala.testutil.ImpalaJdbcClient "$@"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/function-registry/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/common/function-registry/CMakeLists.txt b/common/function-registry/CMakeLists.txt
index faaa731..013632f 100644
--- a/common/function-registry/CMakeLists.txt
+++ b/common/function-registry/CMakeLists.txt
@@ -19,7 +19,7 @@
 cmake_minimum_required(VERSION 2.6)
 
 set(BE_OUTPUT_DIR ${CMAKE_SOURCE_DIR}/be/generated-sources)
-set(FE_OUTPUT_DIR ${CMAKE_SOURCE_DIR}/fe/generated-sources/gen-java/com/cloudera/impala/)
+set(FE_OUTPUT_DIR ${CMAKE_SOURCE_DIR}/fe/generated-sources/gen-java/org/apache/impala/)
 
 # Set output files for dependency tracking
 set(CODE_GEN_OUTPUT

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/function-registry/gen_builtins_catalog.py
----------------------------------------------------------------------
diff --git a/common/function-registry/gen_builtins_catalog.py b/common/function-registry/gen_builtins_catalog.py
index 2f94d1c..6f1cc22 100755
--- a/common/function-registry/gen_builtins_catalog.py
+++ b/common/function-registry/gen_builtins_catalog.py
@@ -44,10 +44,10 @@ java_registry_preamble = '\
 // common/function-registry/gen_builtins_catalog.py or the function list at\n\
 // common/function-registry/impala_functions.py.\n\
 \n\
-package com.cloudera.impala.builtins;\n\
+package org.apache.impala.builtins;\n\
 \n\
-import com.cloudera.impala.catalog.Type;\n\
-import com.cloudera.impala.catalog.Db;\n\
+import org.apache.impala.catalog.Type;\n\
+import org.apache.impala.catalog.Db;\n\
 \n\
 public class ScalarBuiltins { \n\
   public static void initBuiltins(Db db) { \
@@ -58,7 +58,7 @@ java_registry_epilogue = '\
 }\n'
 
 FE_PATH = os.path.expandvars(
-  "$IMPALA_HOME/fe/generated-sources/gen-java/com/cloudera/impala/builtins/")
+  "$IMPALA_HOME/fe/generated-sources/gen-java/org/apache/impala/builtins/")
 
 # This contains all the metadata to describe all the builtins.
 # Each meta data entry is itself a map to store all the meta data

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/CatalogInternalService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/CatalogInternalService.thrift b/common/thrift/CatalogInternalService.thrift
index ef8c320..5b68408 100644
--- a/common/thrift/CatalogInternalService.thrift
+++ b/common/thrift/CatalogInternalService.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "CatalogObjects.thrift"
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/CatalogObjects.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/CatalogObjects.thrift b/common/thrift/CatalogObjects.thrift
index 46370fb..5378988 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Exprs.thrift"
 include "Status.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/CatalogService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/CatalogService.thrift b/common/thrift/CatalogService.thrift
index 64bd52d..ec4bde2 100644
--- a/common/thrift/CatalogService.thrift
+++ b/common/thrift/CatalogService.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "CatalogObjects.thrift"
 include "JniCatalog.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Data.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Data.thrift b/common/thrift/Data.thrift
index 2232b4a..61d1988 100644
--- a/common/thrift/Data.thrift
+++ b/common/thrift/Data.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 // this is a union over all possible return types
 struct TColumnValue {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/DataSinks.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/DataSinks.thrift b/common/thrift/DataSinks.thrift
index 922dcd8..12a75b9 100644
--- a/common/thrift/DataSinks.thrift
+++ b/common/thrift/DataSinks.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Exprs.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Descriptors.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Descriptors.thrift b/common/thrift/Descriptors.thrift
index 40dcd16..1077bb6 100644
--- a/common/thrift/Descriptors.thrift
+++ b/common/thrift/Descriptors.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "CatalogObjects.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/ExecStats.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/ExecStats.thrift b/common/thrift/ExecStats.thrift
index 05aab55..8068b63 100644
--- a/common/thrift/ExecStats.thrift
+++ b/common/thrift/ExecStats.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Status.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Exprs.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Exprs.thrift b/common/thrift/Exprs.thrift
index 2441189..3f91ac3 100644
--- a/common/thrift/Exprs.thrift
+++ b/common/thrift/Exprs.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Types.thrift"
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/ExternalDataSource.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/ExternalDataSource.thrift b/common/thrift/ExternalDataSource.thrift
index 3ddf3f7..59e6617 100644
--- a/common/thrift/ExternalDataSource.thrift
+++ b/common/thrift/ExternalDataSource.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala.extdatasource
-namespace java com.cloudera.impala.extdatasource.thrift
+namespace java org.apache.impala.extdatasource.thrift
 
 include "Status.thrift"
 include "Data.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Frontend.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Frontend.thrift b/common/thrift/Frontend.thrift
index 732ea4a..95d6ba3 100644
--- a/common/thrift/Frontend.thrift
+++ b/common/thrift/Frontend.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Types.thrift"
 include "ImpalaInternalService.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/ImpalaInternalService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/ImpalaInternalService.thrift b/common/thrift/ImpalaInternalService.thrift
index fdb8aa8..089524d 100644
--- a/common/thrift/ImpalaInternalService.thrift
+++ b/common/thrift/ImpalaInternalService.thrift
@@ -19,7 +19,7 @@
 // This file contains the details of the protocol between coordinators and backends.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Status.thrift"
 include "ErrorCodes.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/ImpalaService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/ImpalaService.thrift b/common/thrift/ImpalaService.thrift
index ae00ea2..da41a8e 100644
--- a/common/thrift/ImpalaService.thrift
+++ b/common/thrift/ImpalaService.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "ExecStats.thrift"
 include "Status.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/JniCatalog.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index 8cbe039..b97e458 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "CatalogObjects.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/LineageGraph.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/LineageGraph.thrift b/common/thrift/LineageGraph.thrift
index e4c1b0f..f0d3540 100644
--- a/common/thrift/LineageGraph.thrift
+++ b/common/thrift/LineageGraph.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 struct TVertex {
   // Vertex id

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Logging.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Logging.thrift b/common/thrift/Logging.thrift
index ed2573d..cd3db98 100644
--- a/common/thrift/Logging.thrift
+++ b/common/thrift/Logging.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 // Types to support Impala log forwarding.
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Metrics.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Metrics.thrift b/common/thrift/Metrics.thrift
index c2bc9c1..04f5946 100644
--- a/common/thrift/Metrics.thrift
+++ b/common/thrift/Metrics.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 // Metric and counter data types.
 enum TUnit {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Partitions.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Partitions.thrift b/common/thrift/Partitions.thrift
index a34481b..0e918d1 100644
--- a/common/thrift/Partitions.thrift
+++ b/common/thrift/Partitions.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Exprs.thrift"
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/PlanNodes.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/PlanNodes.thrift b/common/thrift/PlanNodes.thrift
index 0219bcd..4cf1357 100644
--- a/common/thrift/PlanNodes.thrift
+++ b/common/thrift/PlanNodes.thrift
@@ -22,7 +22,7 @@
 // (those are recorded in TPlanFragmentInstanceCtx).
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "CatalogObjects.thrift"
 include "ExecStats.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Planner.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Planner.thrift b/common/thrift/Planner.thrift
index 81d97b2..eb95585 100644
--- a/common/thrift/Planner.thrift
+++ b/common/thrift/Planner.thrift
@@ -19,7 +19,7 @@
 // This file contains structures produced by the planner.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Types.thrift"
 include "Exprs.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Results.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Results.thrift b/common/thrift/Results.thrift
index d4840b1..54aae75 100644
--- a/common/thrift/Results.thrift
+++ b/common/thrift/Results.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Data.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/RuntimeProfile.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/RuntimeProfile.thrift b/common/thrift/RuntimeProfile.thrift
index 71419a0..068108b 100644
--- a/common/thrift/RuntimeProfile.thrift
+++ b/common/thrift/RuntimeProfile.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Metrics.thrift"
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/StatestoreService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/StatestoreService.thrift b/common/thrift/StatestoreService.thrift
index a1dcc24..1677635 100644
--- a/common/thrift/StatestoreService.thrift
+++ b/common/thrift/StatestoreService.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Status.thrift"
 include "Types.thrift"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Status.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Status.thrift b/common/thrift/Status.thrift
index 3f7b2f2..c119875 100644
--- a/common/thrift/Status.thrift
+++ b/common/thrift/Status.thrift
@@ -18,7 +18,7 @@
 include "ErrorCodes.thrift"
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 struct TStatus {
   1: required ErrorCodes.TErrorCode status_code

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/Types.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Types.thrift b/common/thrift/Types.thrift
index 1a503bb..770a414 100644
--- a/common/thrift/Types.thrift
+++ b/common/thrift/Types.thrift
@@ -16,7 +16,7 @@
 // under the License.
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 typedef i64 TTimestamp
 typedef i32 TPlanNodeId

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/generate_error_codes.py
----------------------------------------------------------------------
diff --git a/common/thrift/generate_error_codes.py b/common/thrift/generate_error_codes.py
index 2554a18..216d1c1 100755
--- a/common/thrift/generate_error_codes.py
+++ b/common/thrift/generate_error_codes.py
@@ -332,7 +332,7 @@ preamble = """
 //
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 """
 # The script will always generate the file, CMake will take care of running it only if

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/common/thrift/generate_metrics.py
----------------------------------------------------------------------
diff --git a/common/thrift/generate_metrics.py b/common/thrift/generate_metrics.py
index df21336..dae92bf 100755
--- a/common/thrift/generate_metrics.py
+++ b/common/thrift/generate_metrics.py
@@ -88,7 +88,7 @@ THRIFT_PREAMBLE = """
 //
 
 namespace cpp impala
-namespace java com.cloudera.impala.thrift
+namespace java org.apache.impala.thrift
 
 include "Metrics.thrift"
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/api/pom.xml
----------------------------------------------------------------------
diff --git a/ext-data-source/api/pom.xml b/ext-data-source/api/pom.xml
index 0234972..12f1e56 100644
--- a/ext-data-source/api/pom.xml
+++ b/ext-data-source/api/pom.xml
@@ -21,14 +21,14 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>com.cloudera.impala</groupId>
+    <groupId>org.apache.impala</groupId>
     <artifactId>impala-data-source</artifactId>
     <version>1.0-SNAPSHOT</version>
   </parent>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-data-source-api</artifactId>
   <version>1.0-SNAPSHOT</version>
-  <name>Cloudera Impala External Data Source API</name>
+  <name>Apache Impala (Incubating) External Data Source API</name>
   <description>External Data Source API interface definition</description>
   <packaging>jar</packaging>
   <url>.</url>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
index b8fd675..ce7c55a 100644
--- a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
+++ b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/util/SerializationUtils.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource.util;
+package org.apache.impala.extdatasource.util;
 
 import java.math.BigDecimal;
 import java.nio.ByteBuffer;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
index 66cf1e5..30109a2 100644
--- a/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
+++ b/ext-data-source/api/src/main/java/org/apache/impala/extdatasource/v1/ExternalDataSource.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource.v1;
+package org.apache.impala.extdatasource.v1;
 
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
+import org.apache.impala.extdatasource.thrift.TCloseParams;
+import org.apache.impala.extdatasource.thrift.TCloseResult;
+import org.apache.impala.extdatasource.thrift.TGetNextParams;
+import org.apache.impala.extdatasource.thrift.TGetNextResult;
+import org.apache.impala.extdatasource.thrift.TOpenParams;
+import org.apache.impala.extdatasource.thrift.TOpenResult;
+import org.apache.impala.extdatasource.thrift.TPrepareParams;
+import org.apache.impala.extdatasource.thrift.TPrepareResult;
 
 /**
  * Defines an external data source. Called by Impala during planning (prepare() only)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/pom.xml
----------------------------------------------------------------------
diff --git a/ext-data-source/pom.xml b/ext-data-source/pom.xml
index 63e9a99..db107f3 100644
--- a/ext-data-source/pom.xml
+++ b/ext-data-source/pom.xml
@@ -20,9 +20,9 @@
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-data-source</artifactId>
-  <name>Cloudera Impala External Data Source</name>
+  <name>Apache Impala (Incubating) External Data Source</name>
   <version>1.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/sample/pom.xml
----------------------------------------------------------------------
diff --git a/ext-data-source/sample/pom.xml b/ext-data-source/sample/pom.xml
index e2323a9..383164b 100644
--- a/ext-data-source/sample/pom.xml
+++ b/ext-data-source/sample/pom.xml
@@ -21,14 +21,14 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>com.cloudera.impala</groupId>
+    <groupId>org.apache.impala</groupId>
     <artifactId>impala-data-source</artifactId>
     <version>1.0-SNAPSHOT</version>
   </parent>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-data-source-sample</artifactId>
   <version>1.0-SNAPSHOT</version>
-  <name>Cloudera Impala External Data Source Sample</name>
+  <name>Apache Impala (Incubating) External Data Source Sample</name>
   <description>External Data Source Sample</description>
   <packaging>jar</packaging>
   <url>.</url>
@@ -64,7 +64,7 @@
 
   <dependencies>
     <dependency>
-      <groupId>com.cloudera.impala</groupId>
+      <groupId>org.apache.impala</groupId>
       <artifactId>impala-data-source-api</artifactId>
       <version>${project.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java b/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
index e3d39c4..0f4d899 100644
--- a/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
+++ b/ext-data-source/sample/src/main/java/org/apache/impala/extdatasource/sample/EchoDataSource.java
@@ -15,21 +15,21 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource.sample;
+package org.apache.impala.extdatasource.sample;
 
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.thrift.TRowBatch;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TColumnData;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TErrorCode;
+import org.apache.impala.extdatasource.thrift.TCloseParams;
+import org.apache.impala.extdatasource.thrift.TCloseResult;
+import org.apache.impala.extdatasource.thrift.TGetNextParams;
+import org.apache.impala.extdatasource.thrift.TGetNextResult;
+import org.apache.impala.extdatasource.thrift.TOpenParams;
+import org.apache.impala.extdatasource.thrift.TOpenResult;
+import org.apache.impala.extdatasource.thrift.TPrepareParams;
+import org.apache.impala.extdatasource.thrift.TPrepareResult;
+import org.apache.impala.extdatasource.thrift.TRowBatch;
+import org.apache.impala.extdatasource.v1.ExternalDataSource;
+import org.apache.impala.thrift.TColumnData;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TErrorCode;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/test/pom.xml
----------------------------------------------------------------------
diff --git a/ext-data-source/test/pom.xml b/ext-data-source/test/pom.xml
index af13940..3c3d5a1 100644
--- a/ext-data-source/test/pom.xml
+++ b/ext-data-source/test/pom.xml
@@ -21,14 +21,14 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>com.cloudera.impala</groupId>
+    <groupId>org.apache.impala</groupId>
     <artifactId>impala-data-source</artifactId>
     <version>1.0-SNAPSHOT</version>
   </parent>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-data-source-test</artifactId>
   <version>0.1-SNAPSHOT</version>
-  <name>Cloudera Impala External Data Source Test Library</name>
+  <name>Apache Impala (Incubating) External Data Source Test Library</name>
   <description>Test External Data Source</description>
   <packaging>jar</packaging>
   <url>.</url>
@@ -68,7 +68,7 @@
 
   <dependencies>
     <dependency>
-      <groupId>com.cloudera.impala</groupId>
+      <groupId>org.apache.impala</groupId>
       <artifactId>impala-data-source-api</artifactId>
       <version>${impala.extdatasrc.api.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
index a6a731b..a80c7a4 100644
--- a/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
+++ b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource;
+package org.apache.impala.extdatasource;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
@@ -23,27 +23,27 @@ import java.sql.Timestamp;
 import java.util.List;
 import java.util.UUID;
 
-import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.thrift.TRowBatch;
-import com.cloudera.impala.extdatasource.thrift.TTableSchema;
-import com.cloudera.impala.extdatasource.util.SerializationUtils;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TColumnData;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.extdatasource.thrift.TBinaryPredicate;
+import org.apache.impala.extdatasource.thrift.TCloseParams;
+import org.apache.impala.extdatasource.thrift.TCloseResult;
+import org.apache.impala.extdatasource.thrift.TColumnDesc;
+import org.apache.impala.extdatasource.thrift.TGetNextParams;
+import org.apache.impala.extdatasource.thrift.TGetNextResult;
+import org.apache.impala.extdatasource.thrift.TOpenParams;
+import org.apache.impala.extdatasource.thrift.TOpenResult;
+import org.apache.impala.extdatasource.thrift.TPrepareParams;
+import org.apache.impala.extdatasource.thrift.TPrepareResult;
+import org.apache.impala.extdatasource.thrift.TRowBatch;
+import org.apache.impala.extdatasource.thrift.TTableSchema;
+import org.apache.impala.extdatasource.util.SerializationUtils;
+import org.apache.impala.extdatasource.v1.ExternalDataSource;
+import org.apache.impala.thrift.TColumnData;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TPrimitiveType;
+import org.apache.impala.thrift.TScalarType;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/pom.xml
----------------------------------------------------------------------
diff --git a/fe/pom.xml b/fe/pom.xml
index f7bffb9..08f6623 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -21,12 +21,12 @@ under the License.
 
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-frontend</artifactId>
   <version>0.1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
-  <name>Cloudera Impala Query Engine Frontend</name>
+  <name>Apache Impala (Incubating) Query Engine Frontend</name>
 
   <properties>
     <surefire.reports.dir>${env.IMPALA_LOGS_DIR}/fe_tests</surefire.reports.dir>
@@ -55,7 +55,7 @@ under the License.
       <version>3.0.4</version>
     </dependency>
     <dependency>
-      <groupId>com.cloudera.impala</groupId>
+      <groupId>org.apache.impala</groupId>
       <artifactId>impala-data-source-api</artifactId>
       <version>${impala.extdatasrc.api.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index 94a0a01..6fc76f9 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -15,27 +15,27 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.UnionStmt.UnionOperand;
-import com.cloudera.impala.analysis.UnionStmt.Qualifier;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TTablePropertyType;
+package org.apache.impala.analysis;
+
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.UnionStmt.UnionOperand;
+import org.apache.impala.analysis.UnionStmt.Qualifier;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TTablePropertyType;
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
index e8e9445..29b80ef 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,11 +23,11 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.planner.DataPartition;
-import com.cloudera.impala.thrift.TPartitionType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.planner.DataPartition;
+import org.apache.impala.thrift.TPartitionType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
index f3ad3f8..bced878 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,9 +23,9 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.Type;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index a5cb2ca..4b5fbb4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.thrift.TAlterTableAddPartitionParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.permission.FsAction;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
index aaa223a..0354117 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
index c733ca0..9130740 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableChangeColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableChangeColParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
index d7f5ab5..3753dbe 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableDropColParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index f8bc09c..773fea4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableDropPartitionParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 
 /**



[10/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java b/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
deleted file mode 100644
index f055a58..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TSessionState;
-
-/**
- * Utility functions for working with TSessionState objects.
- */
-public class TSessionStateUtil {
-  /**
-   * Returns the actual user to perform authorisation against for the provided
-   * session. That is, returns the delegated user for a session if set, otherwise
-   * returns the connected user.
-   */
-  public static String getEffectiveUser(TSessionState session) {
-    if (session.getDelegated_user() != null &&
-        !session.getDelegated_user().isEmpty()) {
-      return session.getDelegated_user();
-    }
-    return session.getConnected_user();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java b/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
deleted file mode 100644
index f0c69c5..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
+++ /dev/null
@@ -1,68 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.lang.reflect.Field;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-
-import sun.misc.Unsafe;
-
-@SuppressWarnings("restriction")
-/**
- * Utility class to access unsafe methods.
- */
-public class UnsafeUtil {
-  // object to allow us to use unsafe APIs. This lets us read native memory without
-  // copies and not have to switch back and forth between little endian and big endian.
-  public static final Unsafe UNSAFE;
-
-  // This is the offset to the start of the array data. (There's some bytes
-  // before the array data like the size and other java stuff).
-  private static final int BYTE_ARRAY_DATA_OFFSET;
-
-  static {
-    UNSAFE = (Unsafe) AccessController.doPrivileged(
-        new PrivilegedAction<Object>() {
-          @Override
-          public Object run() {
-            try {
-              Field f = Unsafe.class.getDeclaredField("theUnsafe");
-              f.setAccessible(true);
-              return f.get(null);
-            } catch (NoSuchFieldException e) {
-              throw new Error();
-            } catch (IllegalAccessException e) {
-              throw new Error();
-            }
-          }
-        });
-
-    BYTE_ARRAY_DATA_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
-  }
-
-  // Copies src[srcOffset, srcOffset + len) into dst.
-  public static void Copy(long dst, byte[] src, int srcOffset, int len) {
-    UNSAFE.copyMemory(src, BYTE_ARRAY_DATA_OFFSET + srcOffset, null, dst, len);
-  }
-
-  // Copies src[0, len) into dst[dstOffset, dstOffset + len).
-  public static void Copy(byte[] dst, int dstOffset, long src, int len) {
-    UNSAFE.copyMemory(null, src, dst, dstOffset + BYTE_ARRAY_DATA_OFFSET, len);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/Visitor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/Visitor.java b/fe/src/main/java/com/cloudera/impala/util/Visitor.java
deleted file mode 100644
index ac7b8e3..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/Visitor.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-/**
-* Interface for visitor pattern
-*/
-public interface Visitor<T> {
-
-  void visit(T a);
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
new file mode 100644
index 0000000..e8e9445
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
@@ -0,0 +1,742 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.planner.DataPartition;
+import com.cloudera.impala.thrift.TPartitionType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Encapsulates all the information needed to compute the aggregate functions of a single
+ * Select block, including a possible 2nd phase aggregation step for DISTINCT aggregate
+ * functions and merge aggregation steps needed for distributed execution.
+ *
+ * The latter requires a tree structure of AggregateInfo objects which express the
+ * original aggregate computations as well as the necessary merging aggregate
+ * computations.
+ * TODO: get rid of this by transforming
+ *   SELECT COUNT(DISTINCT a, b, ..) GROUP BY x, y, ...
+ * into an equivalent query with a inline view:
+ *   SELECT COUNT(*) FROM (SELECT DISTINCT a, b, ..., x, y, ...) GROUP BY x, y, ...
+ *
+ * The tree structure looks as follows:
+ * - for non-distinct aggregation:
+ *   - aggInfo: contains the original aggregation functions and grouping exprs
+ *   - aggInfo.mergeAggInfo: contains the merging aggregation functions (grouping
+ *     exprs are identical)
+ * - for distinct aggregation (for an explanation of the phases, see
+ *   SelectStmt.createDistinctAggInfo()):
+ *   - aggInfo: contains the phase 1 aggregate functions and grouping exprs
+ *   - aggInfo.2ndPhaseDistinctAggInfo: contains the phase 2 aggregate functions and
+ *     grouping exprs
+ *   - aggInfo.mergeAggInfo: contains the merging aggregate functions for the phase 1
+ *     computation (grouping exprs are identical)
+ *   - aggInfo.2ndPhaseDistinctAggInfo.mergeAggInfo: contains the merging aggregate
+ *     functions for the phase 2 computation (grouping exprs are identical)
+ *
+ * In general, merging aggregate computations are idempotent; in other words,
+ * aggInfo.mergeAggInfo == aggInfo.mergeAggInfo.mergeAggInfo.
+ *
+ * TODO: move the merge construction logic from SelectStmt into AggregateInfo
+ * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1.
+ */
+public class AggregateInfo extends AggregateInfoBase {
+  private final static Logger LOG = LoggerFactory.getLogger(AggregateInfo.class);
+
+  public enum AggPhase {
+    FIRST,
+    FIRST_MERGE,
+    SECOND,
+    SECOND_MERGE;
+
+    public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; }
+  };
+
+  // created by createMergeAggInfo()
+  private AggregateInfo mergeAggInfo_;
+
+  // created by createDistinctAggInfo()
+  private AggregateInfo secondPhaseDistinctAggInfo_;
+
+  private final AggPhase aggPhase_;
+
+  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
+  // in the intermediate tuple. Identical to outputTupleSmap_ if no aggregateExpr has an
+  // output type that is different from its intermediate type.
+  protected ExprSubstitutionMap intermediateTupleSmap_ = new ExprSubstitutionMap();
+
+  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
+  // in the output tuple.
+  protected ExprSubstitutionMap outputTupleSmap_ = new ExprSubstitutionMap();
+
+  // Map from slots of outputTupleSmap_ to the corresponding slot in
+  // intermediateTupleSmap_.
+  protected ExprSubstitutionMap outputToIntermediateTupleSmap_ =
+      new ExprSubstitutionMap();
+
+  // if set, a subset of groupingExprs_; set and used during planning
+  private List<Expr> partitionExprs_;
+
+  // C'tor creates copies of groupingExprs and aggExprs.
+  private AggregateInfo(ArrayList<Expr> groupingExprs,
+      ArrayList<FunctionCallExpr> aggExprs, AggPhase aggPhase)  {
+    super(groupingExprs, aggExprs);
+    aggPhase_ = aggPhase;
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  private AggregateInfo(AggregateInfo other) {
+    super(other);
+    if (other.mergeAggInfo_ != null) {
+      mergeAggInfo_ = other.mergeAggInfo_.clone();
+    }
+    if (other.secondPhaseDistinctAggInfo_ != null) {
+      secondPhaseDistinctAggInfo_ = other.secondPhaseDistinctAggInfo_.clone();
+    }
+    aggPhase_ = other.aggPhase_;
+    outputTupleSmap_ = other.outputTupleSmap_.clone();
+    if (other.requiresIntermediateTuple()) {
+      intermediateTupleSmap_ = other.intermediateTupleSmap_.clone();
+    } else {
+      Preconditions.checkState(other.intermediateTupleDesc_ == other.outputTupleDesc_);
+      intermediateTupleSmap_ = outputTupleSmap_;
+    }
+    partitionExprs_ =
+        (other.partitionExprs_ != null) ? Expr.cloneList(other.partitionExprs_) : null;
+  }
+
+  public List<Expr> getPartitionExprs() { return partitionExprs_; }
+  public void setPartitionExprs(List<Expr> exprs) { partitionExprs_ = exprs; }
+
+  /**
+   * Creates complete AggregateInfo for groupingExprs and aggExprs, including
+   * aggTupleDesc and aggTupleSMap. If parameter tupleDesc != null, sets aggTupleDesc to
+   * that instead of creating a new descriptor (after verifying that the passed-in
+   * descriptor is correct for the given aggregation).
+   * Also creates mergeAggInfo and secondPhaseDistinctAggInfo, if needed.
+   * If an aggTupleDesc is created, also registers eq predicates between the
+   * grouping exprs and their respective slots with 'analyzer'.
+   */
+  static public AggregateInfo create(
+      ArrayList<Expr> groupingExprs, ArrayList<FunctionCallExpr> aggExprs,
+      TupleDescriptor tupleDesc, Analyzer analyzer)
+          throws AnalysisException {
+    Preconditions.checkState(
+        (groupingExprs != null && !groupingExprs.isEmpty())
+        || (aggExprs != null && !aggExprs.isEmpty()));
+    Expr.removeDuplicates(groupingExprs);
+    Expr.removeDuplicates(aggExprs);
+    AggregateInfo result = new AggregateInfo(groupingExprs, aggExprs, AggPhase.FIRST);
+
+    // collect agg exprs with DISTINCT clause
+    ArrayList<FunctionCallExpr> distinctAggExprs = Lists.newArrayList();
+    if (aggExprs != null) {
+      for (FunctionCallExpr aggExpr: aggExprs) {
+        if (aggExpr.isDistinct()) distinctAggExprs.add(aggExpr);
+      }
+    }
+
+    if (distinctAggExprs.isEmpty()) {
+      if (tupleDesc == null) {
+        result.createTupleDescs(analyzer);
+        result.createSmaps(analyzer);
+      } else {
+        // A tupleDesc should only be given for UNION DISTINCT.
+        Preconditions.checkState(aggExprs == null);
+        result.outputTupleDesc_ = tupleDesc;
+        result.intermediateTupleDesc_ = tupleDesc;
+      }
+      result.createMergeAggInfo(analyzer);
+    } else {
+      // we don't allow you to pass in a descriptor for distinct aggregation
+      // (we need two descriptors)
+      Preconditions.checkState(tupleDesc == null);
+      result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer);
+    }
+    LOG.debug("agg info:\n" + result.debugString());
+    return result;
+  }
+
+  /**
+   * Create aggregate info for select block containing aggregate exprs with
+   * DISTINCT clause.
+   * This creates:
+   * - aggTupleDesc
+   * - a complete secondPhaseDistinctAggInfo
+   * - mergeAggInfo
+   *
+   * At the moment, we require that all distinct aggregate
+   * functions be applied to the same set of exprs (ie, we can't do something
+   * like SELECT COUNT(DISTINCT id), COUNT(DISTINCT address)).
+   * Aggregation happens in two successive phases:
+   * - the first phase aggregates by all grouping exprs plus all parameter exprs
+   *   of DISTINCT aggregate functions
+   *
+   * Example:
+   *   SELECT a, COUNT(DISTINCT b, c), MIN(d), COUNT(*) FROM T GROUP BY a
+   * - 1st phase grouping exprs: a, b, c
+   * - 1st phase agg exprs: MIN(d), COUNT(*)
+   * - 2nd phase grouping exprs: a
+   * - 2nd phase agg exprs: COUNT(*), MIN(<MIN(d) from 1st phase>),
+   *     SUM(<COUNT(*) from 1st phase>)
+   *
+   * TODO: expand implementation to cover the general case; this will require
+   * a different execution strategy
+   */
+  private void createDistinctAggInfo(
+      ArrayList<Expr> origGroupingExprs,
+      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
+          throws AnalysisException {
+    Preconditions.checkState(!distinctAggExprs.isEmpty());
+    // make sure that all DISTINCT params are the same;
+    // ignore top-level implicit casts in the comparison, we might have inserted
+    // those during analysis
+    ArrayList<Expr> expr0Children = Lists.newArrayList();
+
+    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
+        "group_concat")) {
+      // Ignore separator parameter, otherwise the same would have to be present for all
+      // other distinct aggregates as well.
+      // TODO: Deal with constant exprs more generally, instead of special-casing
+      // group_concat().
+      expr0Children.add(distinctAggExprs.get(0).getChild(0).ignoreImplicitCast());
+    } else {
+      for (Expr expr : distinctAggExprs.get(0).getChildren()) {
+        expr0Children.add(expr.ignoreImplicitCast());
+      }
+    }
+    for (int i = 1; i < distinctAggExprs.size(); ++i) {
+      ArrayList<Expr> exprIChildren = Lists.newArrayList();
+      if (distinctAggExprs.get(i).getFnName().getFunction().equalsIgnoreCase(
+          "group_concat")) {
+        exprIChildren.add(distinctAggExprs.get(i).getChild(0).ignoreImplicitCast());
+      } else {
+        for (Expr expr : distinctAggExprs.get(i).getChildren()) {
+          exprIChildren.add(expr.ignoreImplicitCast());
+        }
+      }
+      if (!Expr.equalLists(expr0Children, exprIChildren)) {
+        throw new AnalysisException(
+            "all DISTINCT aggregate functions need to have the same set of "
+            + "parameters as " + distinctAggExprs.get(0).toSql()
+            + "; deviating function: " + distinctAggExprs.get(i).toSql());
+      }
+    }
+
+    // add DISTINCT parameters to grouping exprs
+    groupingExprs_.addAll(expr0Children);
+
+    // remove DISTINCT aggregate functions from aggExprs
+    aggregateExprs_.removeAll(distinctAggExprs);
+
+    createTupleDescs(analyzer);
+    createSmaps(analyzer);
+    createMergeAggInfo(analyzer);
+    createSecondPhaseAggInfo(origGroupingExprs, distinctAggExprs, analyzer);
+  }
+
+  public AggregateInfo getMergeAggInfo() { return mergeAggInfo_; }
+  public AggregateInfo getSecondPhaseDistinctAggInfo() {
+    return secondPhaseDistinctAggInfo_;
+  }
+  public AggPhase getAggPhase() { return aggPhase_; }
+  public boolean isMerge() { return aggPhase_.isMerge(); }
+  public boolean isDistinctAgg() { return secondPhaseDistinctAggInfo_ != null; }
+  public ExprSubstitutionMap getIntermediateSmap() { return intermediateTupleSmap_; }
+  public ExprSubstitutionMap getOutputSmap() { return outputTupleSmap_; }
+  public ExprSubstitutionMap getOutputToIntermediateSmap() {
+    return outputToIntermediateTupleSmap_;
+  }
+
+  public boolean hasAggregateExprs() {
+    return !aggregateExprs_.isEmpty() ||
+        (secondPhaseDistinctAggInfo_ != null &&
+         !secondPhaseDistinctAggInfo_.getAggregateExprs().isEmpty());
+  }
+
+  /**
+   * Return the tuple id produced in the final aggregation step.
+   */
+  public TupleId getResultTupleId() {
+    if (isDistinctAgg()) return secondPhaseDistinctAggInfo_.getOutputTupleId();
+    return getOutputTupleId();
+  }
+
+  public ArrayList<FunctionCallExpr> getMaterializedAggregateExprs() {
+    ArrayList<FunctionCallExpr> result = Lists.newArrayList();
+    for (Integer i: materializedSlots_) {
+      result.add(aggregateExprs_.get(i));
+    }
+    return result;
+  }
+
+  /**
+   * Append ids of all slots that are being referenced in the process
+   * of performing the aggregate computation described by this AggregateInfo.
+   */
+  public void getRefdSlots(List<SlotId> ids) {
+    Preconditions.checkState(outputTupleDesc_ != null);
+    if (groupingExprs_ != null) {
+      Expr.getIds(groupingExprs_, null, ids);
+    }
+    Expr.getIds(aggregateExprs_, null, ids);
+    // The backend assumes that the entire aggTupleDesc is materialized
+    for (int i = 0; i < outputTupleDesc_.getSlots().size(); ++i) {
+      ids.add(outputTupleDesc_.getSlots().get(i).getId());
+    }
+  }
+
+  /**
+   * Substitute all the expressions (grouping expr, aggregate expr) and update our
+   * substitution map according to the given substitution map:
+   * - smap typically maps from tuple t1 to tuple t2 (example: the smap of an
+   *   inline view maps the virtual table ref t1 into a base table ref t2)
+   * - our grouping and aggregate exprs need to be substituted with the given
+   *   smap so that they also reference t2
+   * - aggTupleSMap needs to be recomputed to map exprs based on t2
+   *   onto our aggTupleDesc (ie, the left-hand side needs to be substituted with
+   *   smap)
+   * - mergeAggInfo: this is not affected, because
+   *   * its grouping and aggregate exprs only reference aggTupleDesc_
+   *   * its smap is identical to aggTupleSMap_
+   * - 2ndPhaseDistinctAggInfo:
+   *   * its grouping and aggregate exprs also only reference aggTupleDesc_
+   *     and are therefore not affected
+   *   * its smap needs to be recomputed to map exprs based on t2 to its own
+   *     aggTupleDesc
+   */
+  public void substitute(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws InternalException {
+    groupingExprs_ = Expr.substituteList(groupingExprs_, smap, analyzer, false);
+    LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
+
+    // The smap in this case should not substitute the aggs themselves, only
+    // their subexpressions.
+    List<Expr> substitutedAggs =
+        Expr.substituteList(aggregateExprs_, smap, analyzer, false);
+    aggregateExprs_.clear();
+    for (Expr substitutedAgg: substitutedAggs) {
+      aggregateExprs_.add((FunctionCallExpr) substitutedAgg);
+    }
+
+    LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
+    outputTupleSmap_.substituteLhs(smap, analyzer);
+    intermediateTupleSmap_.substituteLhs(smap, analyzer);
+    if (secondPhaseDistinctAggInfo_ != null) {
+      secondPhaseDistinctAggInfo_.substitute(smap, analyzer);
+    }
+  }
+
+  /**
+   * Create the info for an aggregation node that merges its pre-aggregated inputs:
+   * - pre-aggregation is computed by 'this'
+   * - tuple desc and smap are the same as that of the input (we're materializing
+   *   the same logical tuple)
+   * - grouping exprs: slotrefs to the input's grouping slots
+   * - aggregate exprs: aggregation of the input's aggregateExprs slots
+   *
+   * The returned AggregateInfo shares its descriptor and smap with the input info;
+   * createAggTupleDesc() must not be called on it.
+   */
+  private void createMergeAggInfo(Analyzer analyzer) {
+    Preconditions.checkState(mergeAggInfo_ == null);
+    TupleDescriptor inputDesc = intermediateTupleDesc_;
+    // construct grouping exprs
+    ArrayList<Expr> groupingExprs = Lists.newArrayList();
+    for (int i = 0; i < getGroupingExprs().size(); ++i) {
+      SlotRef slotRef = new SlotRef(inputDesc.getSlots().get(i));
+      groupingExprs.add(slotRef);
+    }
+
+    // construct agg exprs
+    ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
+    for (int i = 0; i < getAggregateExprs().size(); ++i) {
+      FunctionCallExpr inputExpr = getAggregateExprs().get(i);
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      Expr aggExprParam =
+          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
+      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
+          inputExpr, Lists.newArrayList(aggExprParam));
+      aggExpr.analyzeNoThrow(analyzer);
+      aggExprs.add(aggExpr);
+    }
+
+    AggPhase aggPhase =
+        (aggPhase_ == AggPhase.FIRST) ? AggPhase.FIRST_MERGE : AggPhase.SECOND_MERGE;
+    mergeAggInfo_ = new AggregateInfo(groupingExprs, aggExprs, aggPhase);
+    mergeAggInfo_.intermediateTupleDesc_ = intermediateTupleDesc_;
+    mergeAggInfo_.outputTupleDesc_ = outputTupleDesc_;
+    mergeAggInfo_.intermediateTupleSmap_ = intermediateTupleSmap_;
+    mergeAggInfo_.outputTupleSmap_ = outputTupleSmap_;
+    mergeAggInfo_.materializedSlots_ = materializedSlots_;
+  }
+
+  /**
+   * Creates an IF function call that returns NULL if any of the slots
+   * at indexes [firstIdx, lastIdx] return NULL.
+   * For example, the resulting IF function would like this for 3 slots:
+   * IF(IsNull(slot1), NULL, IF(IsNull(slot2), NULL, slot3))
+   * Returns null if firstIdx is greater than lastIdx.
+   * Returns a SlotRef to the last slot if there is only one slot in range.
+   */
+  private Expr createCountDistinctAggExprParam(int firstIdx, int lastIdx,
+      ArrayList<SlotDescriptor> slots) {
+    if (firstIdx > lastIdx) return null;
+
+    Expr elseExpr = new SlotRef(slots.get(lastIdx));
+    if (firstIdx == lastIdx) return elseExpr;
+
+    for (int i = lastIdx - 1; i >= firstIdx; --i) {
+      ArrayList<Expr> ifArgs = Lists.newArrayList();
+      SlotRef slotRef = new SlotRef(slots.get(i));
+      // Build expr: IF(IsNull(slotRef), NULL, elseExpr)
+      Expr isNullPred = new IsNullPredicate(slotRef, false);
+      ifArgs.add(isNullPred);
+      ifArgs.add(new NullLiteral());
+      ifArgs.add(elseExpr);
+      elseExpr = new FunctionCallExpr("if", ifArgs);
+    }
+    return elseExpr;
+  }
+
+  /**
+   * Create the info for an aggregation node that computes the second phase of
+   * DISTINCT aggregate functions.
+   * (Refer to createDistinctAggInfo() for an explanation of the phases.)
+   * - 'this' is the phase 1 aggregation
+   * - grouping exprs are those of the original query (param origGroupingExprs)
+   * - aggregate exprs for the DISTINCT agg fns: these are aggregating the grouping
+   *   slots that were added to the original grouping slots in phase 1;
+   *   count is mapped to count(*) and sum is mapped to sum
+   * - other aggregate exprs: same as the non-DISTINCT merge case
+   *   (count is mapped to sum, everything else stays the same)
+   *
+   * This call also creates the tuple descriptor and smap for the returned AggregateInfo.
+   */
+  private void createSecondPhaseAggInfo(
+      ArrayList<Expr> origGroupingExprs,
+      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
+      throws AnalysisException {
+    Preconditions.checkState(secondPhaseDistinctAggInfo_ == null);
+    Preconditions.checkState(!distinctAggExprs.isEmpty());
+    // The output of the 1st phase agg is the 1st phase intermediate.
+    TupleDescriptor inputDesc = intermediateTupleDesc_;
+
+    // construct agg exprs for original DISTINCT aggregate functions
+    // (these aren't part of aggExprs_)
+    ArrayList<FunctionCallExpr> secondPhaseAggExprs = Lists.newArrayList();
+    for (FunctionCallExpr inputExpr: distinctAggExprs) {
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      FunctionCallExpr aggExpr = null;
+      if (inputExpr.getFnName().getFunction().equals("count")) {
+        // COUNT(DISTINCT ...) ->
+        // COUNT(IF(IsNull(<agg slot 1>), NULL, IF(IsNull(<agg slot 2>), NULL, ...)))
+        // We need the nested IF to make sure that we do not count
+        // column-value combinations if any of the distinct columns are NULL.
+        // This behavior is consistent with MySQL.
+        Expr ifExpr = createCountDistinctAggExprParam(origGroupingExprs.size(),
+            origGroupingExprs.size() + inputExpr.getChildren().size() - 1,
+            inputDesc.getSlots());
+        Preconditions.checkNotNull(ifExpr);
+        ifExpr.analyzeNoThrow(analyzer);
+        aggExpr = new FunctionCallExpr("count", Lists.newArrayList(ifExpr));
+      } else if (inputExpr.getFnName().getFunction().equals("group_concat")) {
+        // Syntax: GROUP_CONCAT([DISTINCT] expression [, separator])
+        ArrayList<Expr> exprList = Lists.newArrayList();
+        // Add "expression" parameter. Need to get it from the inputDesc's slots so the
+        // tuple reference is correct.
+        exprList.add(new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size())));
+        // Check if user provided a custom separator
+        if (inputExpr.getChildren().size() == 2) exprList.add(inputExpr.getChild(1));
+        aggExpr = new FunctionCallExpr(inputExpr.getFnName(), exprList);
+      } else {
+        // SUM(DISTINCT <expr>) -> SUM(<last grouping slot>);
+        // (MIN(DISTINCT ...) and MAX(DISTINCT ...) have their DISTINCT turned
+        // off during analysis, and AVG() is changed to SUM()/COUNT())
+        Expr aggExprParam =
+            new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size()));
+        aggExpr = new FunctionCallExpr(inputExpr.getFnName(),
+            Lists.newArrayList(aggExprParam));
+      }
+      secondPhaseAggExprs.add(aggExpr);
+    }
+
+    // map all the remaining agg fns
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      FunctionCallExpr inputExpr = aggregateExprs_.get(i);
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      // we're aggregating an intermediate slot of the 1st agg phase
+      Expr aggExprParam =
+          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
+      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
+          inputExpr, Lists.newArrayList(aggExprParam));
+      secondPhaseAggExprs.add(aggExpr);
+    }
+    Preconditions.checkState(
+        secondPhaseAggExprs.size() == aggregateExprs_.size() + distinctAggExprs.size());
+
+    for (FunctionCallExpr aggExpr: secondPhaseAggExprs) {
+      aggExpr.analyzeNoThrow(analyzer);
+      Preconditions.checkState(aggExpr.isAggregateFunction());
+    }
+
+    ArrayList<Expr> substGroupingExprs =
+        Expr.substituteList(origGroupingExprs, intermediateTupleSmap_, analyzer, false);
+    secondPhaseDistinctAggInfo_ =
+        new AggregateInfo(substGroupingExprs, secondPhaseAggExprs, AggPhase.SECOND);
+    secondPhaseDistinctAggInfo_.createTupleDescs(analyzer);
+    secondPhaseDistinctAggInfo_.createSecondPhaseAggSMap(this, distinctAggExprs);
+    secondPhaseDistinctAggInfo_.createMergeAggInfo(analyzer);
+  }
+
+  /**
+   * Create smap to map original grouping and aggregate exprs onto output
+   * of secondPhaseDistinctAggInfo.
+   */
+  private void createSecondPhaseAggSMap(
+      AggregateInfo inputAggInfo, ArrayList<FunctionCallExpr> distinctAggExprs) {
+    outputTupleSmap_.clear();
+    int slotIdx = 0;
+    ArrayList<SlotDescriptor> slotDescs = outputTupleDesc_.getSlots();
+
+    int numDistinctParams = distinctAggExprs.get(0).getChildren().size();
+    // If we are counting distinct params of group_concat, we cannot include the custom
+    // separator since it is not a distinct param.
+    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
+        "group_concat")
+        && numDistinctParams == 2) {
+      --numDistinctParams;
+    }
+    int numOrigGroupingExprs =
+        inputAggInfo.getGroupingExprs().size() - numDistinctParams;
+    Preconditions.checkState(slotDescs.size() ==
+        numOrigGroupingExprs + distinctAggExprs.size() +
+        inputAggInfo.getAggregateExprs().size());
+
+    // original grouping exprs -> first m slots
+    for (int i = 0; i < numOrigGroupingExprs; ++i, ++slotIdx) {
+      Expr groupingExpr = inputAggInfo.getGroupingExprs().get(i);
+      outputTupleSmap_.put(
+          groupingExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
+    }
+
+    // distinct agg exprs -> next n slots
+    for (int i = 0; i < distinctAggExprs.size(); ++i, ++slotIdx) {
+      Expr aggExpr = distinctAggExprs.get(i);
+      outputTupleSmap_.put(
+          aggExpr.clone(), (new SlotRef(slotDescs.get(slotIdx))));
+    }
+
+    // remaining agg exprs -> remaining slots
+    for (int i = 0; i < inputAggInfo.getAggregateExprs().size(); ++i, ++slotIdx) {
+      Expr aggExpr = inputAggInfo.getAggregateExprs().get(i);
+      outputTupleSmap_.put(aggExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
+    }
+  }
+
+  /**
+   * Populates the output and intermediate smaps based on the output and intermediate
+   * tuples that are assumed to be set. If an intermediate tuple is required, also
+   * populates the output-to-intermediate smap and registers auxiliary equivalence
+   * predicates between the grouping slots of the two tuples.
+   */
+  public void createSmaps(Analyzer analyzer) {
+    Preconditions.checkNotNull(outputTupleDesc_);
+    Preconditions.checkNotNull(intermediateTupleDesc_);
+
+    List<Expr> exprs = Lists.newArrayListWithCapacity(
+        groupingExprs_.size() + aggregateExprs_.size());
+    exprs.addAll(groupingExprs_);
+    exprs.addAll(aggregateExprs_);
+    for (int i = 0; i < exprs.size(); ++i) {
+      outputTupleSmap_.put(exprs.get(i).clone(),
+          new SlotRef(outputTupleDesc_.getSlots().get(i)));
+      if (!requiresIntermediateTuple()) continue;
+      intermediateTupleSmap_.put(exprs.get(i).clone(),
+          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      outputToIntermediateTupleSmap_.put(
+          new SlotRef(outputTupleDesc_.getSlots().get(i)),
+          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      if (i < groupingExprs_.size()) {
+        analyzer.createAuxEquivPredicate(
+            new SlotRef(outputTupleDesc_.getSlots().get(i)),
+            new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      }
+    }
+    if (!requiresIntermediateTuple()) intermediateTupleSmap_ = outputTupleSmap_;
+
+    LOG.trace("output smap=" + outputTupleSmap_.debugString());
+    LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
+  }
+
+  /**
+   * Mark slots required for this aggregation as materialized:
+   * - all grouping output slots as well as grouping exprs
+   * - for non-distinct aggregation: the aggregate exprs of materialized aggregate slots;
+   *   this assumes that the output slots corresponding to aggregate exprs have already
+   *   been marked by the consumer of this select block
+   * - for distinct aggregation, we mark all aggregate output slots in order to keep
+   *   things simple
+   * Also computes materializedAggregateExprs.
+   * This call must be idempotent because it may be called more than once for Union stmt.
+   */
+  @Override
+  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
+    for (int i = 0; i < groupingExprs_.size(); ++i) {
+      outputTupleDesc_.getSlots().get(i).setIsMaterialized(true);
+      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
+    }
+
+    // collect input exprs: grouping exprs plus aggregate exprs that need to be
+    // materialized
+    materializedSlots_.clear();
+    List<Expr> exprs = Lists.newArrayList();
+    exprs.addAll(groupingExprs_);
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      SlotDescriptor slotDesc =
+          outputTupleDesc_.getSlots().get(groupingExprs_.size() + i);
+      SlotDescriptor intermediateSlotDesc =
+          intermediateTupleDesc_.getSlots().get(groupingExprs_.size() + i);
+      if (isDistinctAgg()) {
+        slotDesc.setIsMaterialized(true);
+        intermediateSlotDesc.setIsMaterialized(true);
+      }
+      if (!slotDesc.isMaterialized()) continue;
+      intermediateSlotDesc.setIsMaterialized(true);
+      exprs.add(aggregateExprs_.get(i));
+      materializedSlots_.add(i);
+    }
+    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
+    analyzer.materializeSlots(resolvedExprs);
+
+    if (isDistinctAgg()) {
+      secondPhaseDistinctAggInfo_.materializeRequiredSlots(analyzer, null);
+    }
+  }
+
+  /**
+   * Checks if all materialized aggregate expressions have distinct semantics.
+   * It returns true if either of the following is true:
+   * (1) all materialized aggregate expressions have distinct semantics
+   *     (e.g. MIN, MAX, NDV). In other words, this optimization will work
+   *     for COUNT(DISTINCT c) but not COUNT(c).
+   * (2) there are no aggregate expressions but only grouping expressions.
+   */
+  public boolean hasAllDistinctAgg() {
+    if (hasAggregateExprs()) {
+      for (FunctionCallExpr aggExpr : getMaterializedAggregateExprs()) {
+        if (!aggExpr.isDistinct() && !aggExpr.ignoresDistinct()) return false;
+      }
+    } else {
+      Preconditions.checkState(!groupingExprs_.isEmpty());
+    }
+    return true;
+  }
+
+  /**
+   * Validates the internal state of this agg info: Checks that the number of
+   * materialized slots of the output tuple corresponds to the number of materialized
+   * aggregate functions plus the number of grouping exprs. Also checks that the return
+   * types of the aggregate and grouping exprs correspond to the slots in the output
+   * tuple.
+   */
+  public void checkConsistency() {
+    ArrayList<SlotDescriptor> slots = outputTupleDesc_.getSlots();
+
+    // Check materialized slots.
+    int numMaterializedSlots = 0;
+    for (SlotDescriptor slotDesc: slots) {
+      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
+    }
+    Preconditions.checkState(numMaterializedSlots ==
+        materializedSlots_.size() + groupingExprs_.size());
+
+    // Check that grouping expr return types match the slot descriptors.
+    int slotIdx = 0;
+    for (int i = 0; i < groupingExprs_.size(); ++i) {
+      Expr groupingExpr = groupingExprs_.get(i);
+      Type slotType = slots.get(slotIdx).getType();
+      Preconditions.checkState(groupingExpr.getType().equals(slotType),
+          String.format("Grouping expr %s returns type %s but its output tuple " +
+              "slot has type %s", groupingExpr.toSql(),
+              groupingExpr.getType().toString(), slotType.toString()));
+      ++slotIdx;
+    }
+    // Check that aggregate expr return types match the slot descriptors.
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      Expr aggExpr = aggregateExprs_.get(i);
+      Type slotType = slots.get(slotIdx).getType();
+      Preconditions.checkState(aggExpr.getType().equals(slotType),
+          String.format("Agg expr %s returns type %s but its output tuple " +
+              "slot has type %s", aggExpr.toSql(), aggExpr.getType().toString(),
+              slotType.toString()));
+      ++slotIdx;
+    }
+  }
+
+  /**
+   * Returns DataPartition derived from grouping exprs.
+   * Returns unpartitioned spec if no grouping.
+   * TODO: this won't work when we start supporting range partitions,
+   * because we could derive both hash and order-based partitions
+   */
+  public DataPartition getPartition() {
+    if (groupingExprs_.isEmpty()) {
+      return DataPartition.UNPARTITIONED;
+    } else {
+      return DataPartition.hashPartitioned(groupingExprs_);
+    }
+  }
+
+  @Override
+  public String debugString() {
+    StringBuilder out = new StringBuilder(super.debugString());
+    out.append(Objects.toStringHelper(this)
+        .add("phase", aggPhase_)
+        .add("intermediate_smap", intermediateTupleSmap_.debugString())
+        .add("output_smap", outputTupleSmap_.debugString())
+        .toString());
+    if (mergeAggInfo_ != this && mergeAggInfo_ != null) {
+      out.append("\nmergeAggInfo:\n" + mergeAggInfo_.debugString());
+    }
+    if (secondPhaseDistinctAggInfo_ != null) {
+      out.append("\nsecondPhaseDistinctAggInfo:\n"
+          + secondPhaseDistinctAggInfo_.debugString());
+    }
+    return out.toString();
+  }
+
+  @Override
+  protected String tupleDebugName() { return "agg-tuple"; }
+
+  @Override
+  public AggregateInfo clone() { return new AggregateInfo(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
new file mode 100644
index 0000000..f3ad3f8
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
@@ -0,0 +1,221 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.ColumnStats;
+import com.cloudera.impala.catalog.Type;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Base class for AggregateInfo and AnalyticInfo containing the intermediate and output
+ * tuple descriptors as well as their smaps for evaluating aggregate functions.
+ */
+public abstract class AggregateInfoBase {
+  private final static Logger LOG =
+      LoggerFactory.getLogger(AggregateInfoBase.class);
+
+  // For aggregations: All unique grouping expressions from a select block.
+  // For analytics: Empty.
+  protected ArrayList<Expr> groupingExprs_;
+
+  // For aggregations: All unique aggregate expressions from a select block.
+  // For analytics: The results of AnalyticExpr.getFnCall() for the unique
+  // AnalyticExprs of a select block.
+  protected ArrayList<FunctionCallExpr> aggregateExprs_;
+
+  // The tuple into which the intermediate output of an aggregation is materialized.
+  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
+  // contain the values of the grouping exprs, followed by slots into which the
+  // aggregateExprs' update()/merge() symbols materialize their output, i.e., slots
+  // of the aggregate functions' intermediate types.
+  // Identical to outputTupleDesc_ if no aggregateExpr has an output type that is
+  // different from its intermediate type.
+  protected TupleDescriptor intermediateTupleDesc_;
+
+  // The tuple into which the final output of the aggregation is materialized.
+  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
+  // contain the values of the grouping exprs, followed by slots into which the
+  // aggregateExprs' finalize() symbol write its result, i.e., slots of the aggregate
+  // functions' output types.
+  protected TupleDescriptor outputTupleDesc_;
+
+  // For aggregation: indices into aggregate exprs for that need to be materialized
+  // For analytics: indices into the analytic exprs and their corresponding aggregate
+  // exprs that need to be materialized.
+  // Populated in materializeRequiredSlots() which must be implemented by subclasses.
+  protected ArrayList<Integer> materializedSlots_ = Lists.newArrayList();
+
+  protected AggregateInfoBase(ArrayList<Expr> groupingExprs,
+      ArrayList<FunctionCallExpr> aggExprs)  {
+    Preconditions.checkState(groupingExprs != null || aggExprs != null);
+    groupingExprs_ =
+        groupingExprs != null ? Expr.cloneList(groupingExprs) : new ArrayList<Expr>();
+    Preconditions.checkState(aggExprs != null || !(this instanceof AnalyticInfo));
+    aggregateExprs_ =
+        aggExprs != null ? Expr.cloneList(aggExprs) : new ArrayList<FunctionCallExpr>();
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  protected AggregateInfoBase(AggregateInfoBase other) {
+    groupingExprs_ =
+        (other.groupingExprs_ != null) ? Expr.cloneList(other.groupingExprs_) : null;
+    aggregateExprs_ =
+        (other.aggregateExprs_ != null) ? Expr.cloneList(other.aggregateExprs_) : null;
+    intermediateTupleDesc_ = other.intermediateTupleDesc_;
+    outputTupleDesc_ = other.outputTupleDesc_;
+    materializedSlots_ = Lists.newArrayList(other.materializedSlots_);
+  }
+
+  /**
+   * Creates the intermediate and output tuple descriptors. If no agg expr has an
+   * intermediate type different from its output type, then only the output tuple
+   * descriptor is created and the intermediate tuple is set to the output tuple.
+   */
+  protected void createTupleDescs(Analyzer analyzer) {
+    // Create the intermediate tuple desc first, so that the tuple ids are increasing
+    // from bottom to top in the plan tree.
+    intermediateTupleDesc_ = createTupleDesc(analyzer, false);
+    if (requiresIntermediateTuple(aggregateExprs_)) {
+      outputTupleDesc_ = createTupleDesc(analyzer, true);
+    } else {
+      outputTupleDesc_ = intermediateTupleDesc_;
+    }
+  }
+
+  /**
+   * Returns a tuple descriptor for the aggregation/analytic's intermediate or final
+   * result, depending on whether isOutputTuple is true or false.
+   * Also updates the appropriate substitution map, and creates and registers auxiliary
+   * equality predicates between the grouping slots and the grouping exprs.
+   */
+  private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple) {
+    TupleDescriptor result =
+        analyzer.getDescTbl().createTupleDescriptor(
+          tupleDebugName() + (isOutputTuple ? "-out" : "-intermed"));
+    List<Expr> exprs = Lists.newArrayListWithCapacity(
+        groupingExprs_.size() + aggregateExprs_.size());
+    exprs.addAll(groupingExprs_);
+    exprs.addAll(aggregateExprs_);
+
+    int aggregateExprStartIndex = groupingExprs_.size();
+    for (int i = 0; i < exprs.size(); ++i) {
+      Expr expr = exprs.get(i);
+      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(result);
+      slotDesc.initFromExpr(expr);
+      if (i < aggregateExprStartIndex) {
+        // register equivalence between grouping slot and grouping expr;
+        // do this only when the grouping expr isn't a constant, otherwise
+        // it'll simply show up as a gratuitous HAVING predicate
+        // (which would actually be incorrect if the constant happens to be NULL)
+        if (!expr.isConstant()) {
+          analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
+        }
+      } else {
+        Preconditions.checkArgument(expr instanceof FunctionCallExpr);
+        FunctionCallExpr aggExpr = (FunctionCallExpr)expr;
+        if (aggExpr.isMergeAggFn()) {
+          slotDesc.setLabel(aggExpr.getChild(0).toSql());
+          slotDesc.setSourceExpr(aggExpr.getChild(0));
+        } else {
+          slotDesc.setLabel(aggExpr.toSql());
+          slotDesc.setSourceExpr(aggExpr);
+        }
+
+        // count(*) is non-nullable.
+        if (aggExpr.getFnName().getFunction().equals("count")) {
+          // TODO: Consider making nullability a property of types or of builtin agg fns.
+          // row_number, rank, and dense_rank are non-nullable as well.
+          slotDesc.setIsNullable(false);
+        }
+        if (!isOutputTuple) {
+          Type intermediateType = ((AggregateFunction)aggExpr.fn_).getIntermediateType();
+          if (intermediateType != null) {
+            // Use the output type as intermediate if the function has a wildcard decimal.
+            if (!intermediateType.isWildcardDecimal()) {
+              slotDesc.setType(intermediateType);
+            } else {
+              Preconditions.checkState(expr.getType().isDecimal());
+            }
+          }
+        }
+      }
+    }
+    String prefix = (isOutputTuple ? "result " : "intermediate ");
+    LOG.trace(prefix + " tuple=" + result.debugString());
+    return result;
+  }
+
+  /**
+   * Marks the slots required for evaluating an Analytic/AggregateInfo by
+   * resolving the materialized aggregate/analytic exprs against smap,
+   * and then marking their slots.
+   */
+  public abstract void materializeRequiredSlots(Analyzer analyzer,
+      ExprSubstitutionMap smap);
+
+  public ArrayList<Expr> getGroupingExprs() { return groupingExprs_; }
+  public ArrayList<FunctionCallExpr> getAggregateExprs() { return aggregateExprs_; }
+  public TupleDescriptor getOutputTupleDesc() { return outputTupleDesc_; }
+  public TupleDescriptor getIntermediateTupleDesc() { return intermediateTupleDesc_; }
+  public TupleId getIntermediateTupleId() { return intermediateTupleDesc_.getId(); }
+  public TupleId getOutputTupleId() { return outputTupleDesc_.getId(); }
+  public boolean requiresIntermediateTuple() {
+    Preconditions.checkNotNull(intermediateTupleDesc_);
+    Preconditions.checkNotNull(outputTupleDesc_);
+    return intermediateTupleDesc_ != outputTupleDesc_;
+  }
+
+  /**
+   * Returns true if evaluating the given aggregate exprs requires an intermediate tuple,
+   * i.e., whether one of the aggregate functions has an intermediate type different from
+   * its output type.
+   */
+  public static <T extends Expr> boolean requiresIntermediateTuple(List<T> aggExprs) {
+    for (Expr aggExpr: aggExprs) {
+      Type intermediateType = ((AggregateFunction) aggExpr.fn_).getIntermediateType();
+      if (intermediateType != null) return true;
+    }
+    return false;
+  }
+
+  public String debugString() {
+    StringBuilder out = new StringBuilder();
+    out.append(Objects.toStringHelper(this)
+        .add("grouping_exprs", Expr.debugString(groupingExprs_))
+        .add("aggregate_exprs", Expr.debugString(aggregateExprs_))
+        .add("intermediate_tuple", (intermediateTupleDesc_ == null)
+            ? "null" : intermediateTupleDesc_.debugString())
+        .add("output_tuple", (outputTupleDesc_ == null)
+            ? "null" : outputTupleDesc_.debugString())
+        .toString());
+    return out.toString();
+  }
+
+  protected abstract String tupleDebugName();
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
new file mode 100644
index 0000000..a5cb2ca
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -0,0 +1,117 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.FileSystemUtil;
+import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.permission.FsAction;
+
+/**
+ * Represents an ALTER TABLE ADD PARTITION statement.
+ */
+public class AlterTableAddPartitionStmt extends AlterTableStmt {
+  private final HdfsUri location_;
+  private final boolean ifNotExists_;
+  private final PartitionSpec partitionSpec_;
+  private final HdfsCachingOp cacheOp_;
+
+  public AlterTableAddPartitionStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
+      HdfsCachingOp cacheOp) {
+    super(tableName);
+    Preconditions.checkState(partitionSpec != null);
+    location_ = location;
+    ifNotExists_ = ifNotExists;
+    partitionSpec_ = partitionSpec;
+    partitionSpec_.setTableName(tableName);
+    cacheOp_ = cacheOp;
+  }
+
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public HdfsUri getLocation() { return location_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
+    sb.append(" ADD ");
+    if (ifNotExists_) {
+      sb.append("IF NOT EXISTS ");
+    }
+    sb.append(" " + partitionSpec_.toSql());
+    if (location_ != null) {
+      sb.append(String.format(" LOCATION '%s'", location_));
+    }
+    if (cacheOp_ != null) sb.append(cacheOp_.toSql());
+    return sb.toString();
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_PARTITION);
+    TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
+    addPartParams.setPartition_spec(partitionSpec_.toThrift());
+    addPartParams.setLocation(location_ == null ? null : location_.toString());
+    addPartParams.setIf_not_exists(ifNotExists_);
+    if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
+    params.setAdd_partition_params(addPartParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+
+    boolean shouldCache = false;
+    Table table = getTargetTable();
+    if (cacheOp_ != null) {
+      cacheOp_.analyze(analyzer);
+      shouldCache = cacheOp_.shouldCache();
+    } else if (table instanceof HdfsTable) {
+      shouldCache = ((HdfsTable)table).isMarkedCached();
+    }
+    if (shouldCache) {
+      if (!(table instanceof HdfsTable)) {
+        throw new AnalysisException("Caching must target a HDFS table: " +
+            table.getFullName());
+      }
+      HdfsTable hdfsTable = (HdfsTable)table;
+      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
+          (location_ == null && !hdfsTable.isLocationCacheable())) {
+        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
+            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
+            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
+            table.getFullName()));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
new file mode 100644
index 0000000..aaa223a
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
+ */
+public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
+  private final List<ColumnDef> columnDefs_;
+  private final boolean replaceExistingCols_;
+
+  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
+      boolean replaceExistingCols) {
+    super(tableName);
+    Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
+    columnDefs_ = Lists.newArrayList(columnDefs);
+    replaceExistingCols_ = replaceExistingCols;
+  }
+
+  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
+
+  // Replace columns instead of appending new columns.
+  public boolean getReplaceExistingCols() {
+    return replaceExistingCols_;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
+    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
+    for (ColumnDef col: getColumnDescs()) {
+      colParams.addToColumns(col.toThrift());
+    }
+    colParams.setReplace_existing_cols(replaceExistingCols_);
+    params.setAdd_replace_cols_params(colParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
+          "supported on HBase tables.");
+    }
+
+    // Build a set of the partition keys for the table.
+    Set<String> existingPartitionKeys = Sets.newHashSet();
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      existingPartitionKeys.add(fs.getName().toLowerCase());
+    }
+
+    // Make sure the new columns don't already exist in the table, that the names
+    // are all valid and unique, and that none of the columns conflict with
+    // partition columns.
+    Set<String> colNames = Sets.newHashSet();
+    for (ColumnDef c: columnDefs_) {
+      c.analyze();
+      String colName = c.getColName().toLowerCase();
+      if (existingPartitionKeys.contains(colName)) {
+        throw new AnalysisException(
+            "Column name conflicts with existing partition column: " + colName);
+      }
+
+      Column col = t.getColumn(colName);
+      if (col != null && !replaceExistingCols_) {
+        throw new AnalysisException("Column already exists: " + colName);
+      } else if (!colNames.add(colName)) {
+        throw new AnalysisException("Duplicate column name: " + colName);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
new file mode 100644
index 0000000..c733ca0
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
@@ -0,0 +1,101 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableChangeColParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE CHANGE COLUMN colName newColDef statement.
+ * Note: It would be fairly simple to reuse this class to support ALTER TABLE MODIFY
+ * newColDef statements in the future my making colName optional.
+ */
+public class AlterTableChangeColStmt extends AlterTableStmt {
+  private final String colName_;
+  private final ColumnDef newColDef_;
+
+  public AlterTableChangeColStmt(TableName tableName, String colName,
+      ColumnDef newColDef) {
+    super(tableName);
+    Preconditions.checkNotNull(newColDef);
+    Preconditions.checkState(colName != null && !colName.isEmpty());
+    colName_ = colName;
+    newColDef_ = newColDef;
+  }
+
+  public String getColName() { return colName_; }
+  public ColumnDef getNewColDef() { return newColDef_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.CHANGE_COLUMN);
+    TAlterTableChangeColParams colParams = new TAlterTableChangeColParams();
+    colParams.setCol_name(colName_);
+    colParams.setNew_col_def(newColDef_.toThrift());
+    params.setChange_col_params(colParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE CHANGE COLUMN not currently supported " +
+          "on HBase tables.");
+    }
+    String tableName = getDb() + "." + getTbl();
+
+    // Verify there are no conflicts with partition columns.
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
+        throw new AnalysisException("Cannot modify partition column: " + colName_);
+      }
+      if (fs.getName().toLowerCase().equals(newColDef_.getColName().toLowerCase())) {
+        throw new AnalysisException(
+            "Column name conflicts with existing partition column: " +
+            newColDef_.getColName());
+      }
+    }
+
+    // Verify the column being modified exists in the table
+    if (t.getColumn(colName_) == null) {
+      throw new AnalysisException(String.format(
+          "Column '%s' does not exist in table: %s", colName_, tableName));
+    }
+
+    // Check that the new column def's name is valid.
+    newColDef_.analyze();
+    // Verify that if the column name is being changed, the new name doesn't conflict
+    // with an existing column.
+    if (!colName_.toLowerCase().equals(newColDef_.getColName().toLowerCase()) &&
+        t.getColumn(newColDef_.getColName()) != null) {
+      throw new AnalysisException("Column already exists: " + newColDef_.getColName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
new file mode 100644
index 0000000..d7f5ab5
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
@@ -0,0 +1,84 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableDropColParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE DROP COLUMN statement.
+ * Note: Hive does not support this syntax for droppping columns, but it is supported
+ * by mysql.
+ */
+public class AlterTableDropColStmt extends AlterTableStmt {
+  private final String colName_;
+
+  public AlterTableDropColStmt(TableName tableName, String colName) {
+    super(tableName);
+    Preconditions.checkState(colName != null && !colName.isEmpty());
+    colName_ = colName;
+  }
+
+  public String getColName() { return colName_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.DROP_COLUMN);
+    TAlterTableDropColParams dropColParams = new TAlterTableDropColParams(colName_);
+    params.setDrop_col_params(dropColParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE DROP COLUMN not currently supported " +
+          "on HBase tables.");
+    }
+    String tableName = getDb() + "." + getTbl();
+
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
+        throw new AnalysisException("Cannot drop partition column: " + fs.getName());
+      }
+    }
+
+    if (t.getColumns().size() - t.getMetaStoreTable().getPartitionKeysSize() <= 1) {
+      throw new AnalysisException(String.format(
+          "Cannot drop column '%s' from %s. Tables must contain at least 1 column.",
+          colName_, tableName));
+    }
+
+    if (t.getColumn(colName_) == null) {
+      throw new AnalysisException(String.format(
+          "Column '%s' does not exist in table: %s", colName_, tableName));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
new file mode 100644
index 0000000..f8bc09c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE DROP PARTITION statement.
+ */
+public class AlterTableDropPartitionStmt extends AlterTableStmt {
+  private final boolean ifExists_;
+  private final PartitionSpec partitionSpec_;
+
+  // Setting this value causes dropped partition(s) to be permanently
+  // deleted. For example, for HDFS tables it skips the trash mechanism
+  private final boolean purgePartition_;
+
+  public AlterTableDropPartitionStmt(TableName tableName,
+      PartitionSpec partitionSpec, boolean ifExists, boolean purgePartition) {
+    super(tableName);
+    Preconditions.checkNotNull(partitionSpec);
+    partitionSpec_ = partitionSpec;
+    partitionSpec_.setTableName(tableName);
+    ifExists_ = ifExists;
+    purgePartition_ = purgePartition;
+  }
+
+  public boolean getIfNotExists() { return ifExists_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
+    sb.append(" DROP ");
+    if (ifExists_) sb.append("IF EXISTS ");
+    sb.append(" DROP " + partitionSpec_.toSql());
+    if (purgePartition_) sb.append(" PURGE");
+    return sb.toString();
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.DROP_PARTITION);
+    TAlterTableDropPartitionParams addPartParams = new TAlterTableDropPartitionParams();
+    addPartParams.setPartition_spec(partitionSpec_.toThrift());
+    addPartParams.setIf_exists(ifExists_);
+    addPartParams.setPurge(purgePartition_);
+    params.setDrop_partition_params(addPartParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    if (!ifExists_) partitionSpec_.setPartitionShouldExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
new file mode 100644
index 0000000..009535c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE/VIEW RENAME statement.
+ */
+public class AlterTableOrViewRenameStmt extends AlterTableStmt {
+  protected final TableName newTableName_;
+
+  // Set during analysis
+  protected String newDbName_;
+
+  //  True if we are renaming a table. False if we are renaming a view.
+  protected final boolean renameTable_;
+
+  public AlterTableOrViewRenameStmt(TableName oldTableName, TableName newTableName,
+      boolean renameTable) {
+    super(oldTableName);
+    Preconditions.checkState(newTableName != null && !newTableName.isEmpty());
+    newTableName_ = newTableName;
+    renameTable_ = renameTable;
+  }
+
+  public String getNewTbl() {
+    return newTableName_.getTbl();
+  }
+
+  public String getNewDb() {
+    Preconditions.checkNotNull(newDbName_);
+    return newDbName_;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(
+        (renameTable_) ? TAlterTableType.RENAME_TABLE : TAlterTableType.RENAME_VIEW);
+    TAlterTableOrViewRenameParams renameParams =
+        new TAlterTableOrViewRenameParams(new TTableName(getNewDb(), getNewTbl()));
+    params.setRename_params(renameParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    newTableName_.analyze();
+    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+    if (table_ instanceof View && renameTable_) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
+    }
+    if (!(table_ instanceof View) && !renameTable_) {
+      throw new AnalysisException(String.format(
+          "ALTER VIEW not allowed on a table: %s", table_.getFullName()));
+    }
+    newDbName_ = analyzer.getTargetDbName(newTableName_);
+    if (analyzer.dbContainsTable(newDbName_, newTableName_.getTbl(), Privilege.CREATE)) {
+      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
+          String.format("%s.%s", newDbName_, getNewTbl()));
+    }
+    analyzer.addAccessEvent(new TAccessEvent(newDbName_ + "." + newTableName_.getTbl(),
+        table_.getCatalogObjectType(), Privilege.CREATE.toString()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
new file mode 100644
index 0000000..c7e796c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
@@ -0,0 +1,56 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+
+/**
+ * Represents an ALTER TABLE RECOVER PARTITIONS statement.
+ */
+public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
+
+  public AlterTableRecoverPartitionsStmt(TableName tableName) {
+    super(tableName);
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.RECOVER_PARTITIONS);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    // Make sure the target table is HdfsTable.
+    if (!(table_ instanceof HdfsTable)) {
+      throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS " +
+          "must target an HDFS table: " + tableName_);
+    }
+
+    // Make sure the target table is partitioned.
+    if (table_.getMetaStoreTable().getPartitionKeysSize() == 0) {
+      throw new AnalysisException("Table is not partitioned: " + tableName_);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
new file mode 100644
index 0000000..f63cf3e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.HdfsPartition;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET [UNCACHED|CACHED 'pool'].
+ */
+public class AlterTableSetCachedStmt extends AlterTableSetStmt {
+  private final HdfsCachingOp cacheOp_;
+
+  public AlterTableSetCachedStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsCachingOp cacheOp) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(cacheOp);
+    cacheOp_ = cacheOp;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_CACHED);
+    TAlterTableSetCachedParams cachingParams =
+        new TAlterTableSetCachedParams();
+    if (getPartitionSpec() != null) {
+      cachingParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    cachingParams.setCache_op(cacheOp_.toThrift());
+    params.setSet_cached_params(cachingParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    cacheOp_.analyze(analyzer);
+
+    Table table = getTargetTable();
+    Preconditions.checkNotNull(table);
+    if (!(table instanceof HdfsTable)) {
+      throw new AnalysisException("ALTER TABLE SET [CACHED|UNCACHED] must target an " +
+          "HDFS table: " + table.getFullName());
+    }
+
+    if (cacheOp_.shouldCache()) {
+      boolean isCacheable;
+      PartitionSpec partSpec = getPartitionSpec();
+      HdfsTable hdfsTable = (HdfsTable)table;
+      StringBuilder nameSb = new StringBuilder();
+      if (partSpec != null) {
+        HdfsPartition part = hdfsTable.getPartition(partSpec.getPartitionSpecKeyValues());
+        if (part == null) {
+          throw new AnalysisException("Partition spec does not exist: " +
+              partSpec.toSql());
+        }
+        isCacheable = part.isCacheable();
+        nameSb.append("Partition (" + part.getPartitionName() + ")");
+      } else {
+        isCacheable = hdfsTable.isCacheable();
+        nameSb.append("Table ").append(table.getFullName());
+      }
+      if (!isCacheable) {
+        throw new AnalysisException(nameSb.toString() + " cannot be cached. Please " +
+            "check if the table or partitions are on a filesystem which supports " +
+            "caching.");
+      }
+    }
+  }
+}


[22/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/DataPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/DataPartition.java b/fe/src/main/java/com/cloudera/impala/planner/DataPartition.java
deleted file mode 100644
index 3320c2b..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/DataPartition.java
+++ /dev/null
@@ -1,131 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.thrift.TDataPartition;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Specification of the partition of a single stream of data.
- * Examples of those streams of data are: the scan of a table; the output
- * of a plan fragment; etc. (ie, this is not restricted to direct exchanges
- * between two fragments, which in the backend is facilitated by the classes
- * DataStreamSender/DataStreamMgr/DataStreamRecvr).
- */
-public class DataPartition {
-  private final static Logger LOG = LoggerFactory.getLogger(DataPartition.class);
-
-  private final TPartitionType type_;
-
-  // for hash partition: exprs used to compute hash value
-  private List<Expr> partitionExprs_;
-
-  private DataPartition(TPartitionType type, List<Expr> exprs) {
-    Preconditions.checkNotNull(exprs);
-    Preconditions.checkState(!exprs.isEmpty());
-    Preconditions.checkState(type == TPartitionType.HASH_PARTITIONED
-        || type == TPartitionType.RANGE_PARTITIONED);
-    type_ = type;
-    partitionExprs_ = exprs;
-  }
-
-  private DataPartition(TPartitionType type) {
-    Preconditions.checkState(type == TPartitionType.UNPARTITIONED
-        || type == TPartitionType.RANDOM);
-    type_ = type;
-    partitionExprs_ = Lists.newArrayList();
-  }
-
-  public final static DataPartition UNPARTITIONED =
-      new DataPartition(TPartitionType.UNPARTITIONED);
-
-  public final static DataPartition RANDOM =
-      new DataPartition(TPartitionType.RANDOM);
-
-  public static DataPartition hashPartitioned(List<Expr> exprs) {
-    return new DataPartition(TPartitionType.HASH_PARTITIONED, exprs);
-  }
-
-  public boolean isPartitioned() { return type_ != TPartitionType.UNPARTITIONED; }
-  public boolean isHashPartitioned() { return type_ == TPartitionType.HASH_PARTITIONED; }
-  public TPartitionType getType() { return type_; }
-  public List<Expr> getPartitionExprs() { return partitionExprs_; }
-
-  public void substitute(ExprSubstitutionMap smap, Analyzer analyzer) {
-    partitionExprs_ = Expr.substituteList(partitionExprs_, smap, analyzer, false);
-  }
-
-  public TDataPartition toThrift() {
-    TDataPartition result = new TDataPartition(type_);
-    if (partitionExprs_ != null) {
-      result.setPartition_exprs(Expr.treesToThrift(partitionExprs_));
-    }
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    DataPartition other = (DataPartition) obj;
-    if (type_ != other.type_) return false;
-    return Expr.equalLists(partitionExprs_, other.partitionExprs_);
-  }
-
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("type_", type_)
-        .addValue(Expr.debugString(partitionExprs_))
-        .toString();
-  }
-
-  public String getExplainString() {
-    StringBuilder str = new StringBuilder();
-    str.append(getPartitionShortName(type_));
-    if (!partitionExprs_.isEmpty()) {
-      List<String> strings = Lists.newArrayList();
-      for (Expr expr: partitionExprs_) {
-        strings.add(expr.toSql());
-      }
-      str.append("(" + Joiner.on(",").join(strings) +")");
-    }
-    return str.toString();
-  }
-
-  private String getPartitionShortName(TPartitionType partition) {
-    switch (partition) {
-      case RANDOM: return "RANDOM";
-      case HASH_PARTITIONED: return "HASH";
-      case RANGE_PARTITIONED: return "RANGE";
-      case UNPARTITIONED: return "UNPARTITIONED";
-      default: return "";
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/DataSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/DataSink.java b/fe/src/main/java/com/cloudera/impala/planner/DataSink.java
deleted file mode 100644
index ff81b50..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/DataSink.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TExplainLevel;
-
-/**
- * A DataSink describes the destination of a plan fragment's output rows.
- * The destination could be another plan fragment on a remote machine,
- * or a table into which the rows are to be inserted
- * (i.e., the destination of the last fragment of an INSERT statement).
- */
-public abstract class DataSink {
-
-  // estimated per-host memory requirement for sink;
-  // set in computeCosts(); invalid: -1
-  protected long perHostMemCost_ = -1;
-
-  // Fragment that this DataSink belongs to. Set by the PlanFragment enclosing this sink.
-  protected PlanFragment fragment_;
-
-  /**
-   * Return an explain string for the DataSink. Each line of the explain will be prefixed
-   * by "prefix".
-   */
-  public abstract String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel explainLevel);
-
-  protected abstract TDataSink toThrift();
-
-  public void setFragment(PlanFragment fragment) { fragment_ = fragment; }
-  public PlanFragment getFragment() { return fragment_; }
-  public long getPerHostMemCost() { return perHostMemCost_; }
-
-  /**
-   * Estimates the cost of executing this DataSink. Currently only sets perHostMemCost.
-   */
-  public void computeCosts() {
-    perHostMemCost_ = 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/DataSourceScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/DataSourceScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/DataSourceScanNode.java
deleted file mode 100644
index ab92605..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/DataSourceScanNode.java
+++ /dev/null
@@ -1,371 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.extdatasource.ExternalDataSourceExecutor;
-import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
-import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
-import com.cloudera.impala.extdatasource.thrift.TComparisonOp;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TCacheJarResult;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TDataSourceScanNode;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.cloudera.impala.thrift.TStatus;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-/**
- * Scan of a table provided by an external data source.
- */
-public class DataSourceScanNode extends ScanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(DataSourceScanNode.class);
-  private final TupleDescriptor desc_;
-  private final DataSourceTable table_;
-
-  // The converted conjuncts_ that were accepted by the data source. A conjunct can
-  // be converted if it contains only disjunctive predicates of the form
-  // <slotref> <op> <constant>.
-  private List<List<TBinaryPredicate>> acceptedPredicates_;
-
-  // The conjuncts that were accepted by the data source and removed from conjuncts_ in
-  // removeAcceptedConjuncts(). Only used in getNodeExplainString() to print the
-  // conjuncts applied by the data source.
-  private List<Expr> acceptedConjuncts_;
-
-  // The number of rows estimate as returned by prepare().
-  private long numRowsEstimate_;
-
-  public DataSourceScanNode(PlanNodeId id, TupleDescriptor desc) {
-    super(id, desc, "SCAN DATA SOURCE");
-    desc_ = desc;
-    table_ = (DataSourceTable) desc_.getTable();
-    acceptedPredicates_ = null;
-    acceptedConjuncts_ = null;
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    checkForSupportedFileFormats();
-    assignConjuncts(analyzer);
-    analyzer.createEquivConjuncts(tupleIds_.get(0), conjuncts_);
-    prepareDataSource();
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    computeStats(analyzer);
-    // materialize slots in remaining conjuncts_
-    analyzer.materializeSlots(conjuncts_);
-    computeMemLayout(analyzer);
-    computeScanRangeLocations(analyzer);
-  }
-
-  /**
-   * Returns a thrift TColumnValue representing the literal from a binary
-   * predicate, or null if the type cannot be represented.
-   */
-  public static TColumnValue literalToColumnValue(LiteralExpr expr) {
-    switch (expr.getType().getPrimitiveType()) {
-      case BOOLEAN:
-        return new TColumnValue().setBool_val(((BoolLiteral) expr).getValue());
-      case TINYINT:
-        return new TColumnValue().setByte_val(
-            (byte) ((NumericLiteral) expr).getLongValue());
-      case SMALLINT:
-        return new TColumnValue().setShort_val(
-            (short) ((NumericLiteral) expr).getLongValue());
-      case INT:
-        return new TColumnValue().setInt_val(
-            (int) ((NumericLiteral) expr).getLongValue());
-      case BIGINT:
-        return new TColumnValue().setLong_val(((NumericLiteral) expr).getLongValue());
-      case FLOAT:
-      case DOUBLE:
-        return new TColumnValue().setDouble_val(
-            ((NumericLiteral) expr).getDoubleValue());
-      case STRING:
-        return new TColumnValue().setString_val(((StringLiteral) expr).getValue());
-      case DECIMAL:
-      case DATE:
-      case DATETIME:
-      case TIMESTAMP:
-        // TODO: we support DECIMAL and TIMESTAMP but no way to specify it in SQL.
-        return null;
-      default:
-        Preconditions.checkState(false);
-        return null;
-    }
-  }
-
-  /**
-   * Calls prepare() on the data source to determine accepted predicates and get
-   * stats. The accepted predicates are moved from conjuncts_ into acceptedConjuncts_
-   * and the associated TBinaryPredicates are set in acceptedPredicates_.
-   */
-  private void prepareDataSource() throws InternalException {
-    // Binary predicates that will be offered to the data source.
-    List<List<TBinaryPredicate>> offeredPredicates = Lists.newArrayList();
-    // The index into conjuncts_ for each element in offeredPredicates.
-    List<Integer> conjunctsIdx = Lists.newArrayList();
-    for (int i = 0; i < conjuncts_.size(); ++i) {
-      Expr conjunct = conjuncts_.get(i);
-      List<TBinaryPredicate> disjuncts = getDisjuncts(conjunct);
-      if (disjuncts != null) {
-        offeredPredicates.add(disjuncts);
-        conjunctsIdx.add(i);
-      }
-    }
-
-    String hdfsLocation = table_.getDataSource().getHdfs_location();
-    TCacheJarResult cacheResult = FeSupport.CacheJar(hdfsLocation);
-    TStatus cacheJarStatus = cacheResult.getStatus();
-    if (cacheJarStatus.getStatus_code() != TErrorCode.OK) {
-      throw new InternalException(String.format(
-          "Unable to cache data source library at location '%s'. Check that the file " +
-          "exists and is readable. Message: %s",
-          hdfsLocation, Joiner.on("\n").join(cacheJarStatus.getError_msgs())));
-    }
-    String localPath = cacheResult.getLocal_path();
-    String className = table_.getDataSource().getClass_name();
-    String apiVersion = table_.getDataSource().getApi_version();
-    TPrepareResult prepareResult;
-    TStatus prepareStatus;
-    try {
-      ExternalDataSourceExecutor executor = new ExternalDataSourceExecutor(
-          localPath, className, apiVersion, table_.getInitString());
-      TPrepareParams prepareParams = new TPrepareParams();
-      prepareParams.setInit_string(table_.getInitString());
-      prepareParams.setPredicates(offeredPredicates);
-      // TODO: Include DB (i.e. getFullName())?
-      prepareParams.setTable_name(table_.getName());
-      prepareResult = executor.prepare(prepareParams);
-      prepareStatus = prepareResult.getStatus();
-    } catch (Exception e) {
-      throw new InternalException(String.format(
-          "Error calling prepare() on data source %s",
-          DataSource.debugString(table_.getDataSource())), e);
-    }
-    if (prepareStatus.getStatus_code() != TErrorCode.OK) {
-      throw new InternalException(String.format(
-          "Data source %s returned an error from prepare(): %s",
-          DataSource.debugString(table_.getDataSource()),
-          Joiner.on("\n").join(prepareStatus.getError_msgs())));
-    }
-
-    numRowsEstimate_ = prepareResult.getNum_rows_estimate();
-    acceptedPredicates_ = Lists.newArrayList();
-    List<Integer> acceptedPredicatesIdx = prepareResult.isSetAccepted_conjuncts() ?
-        prepareResult.getAccepted_conjuncts() : ImmutableList.<Integer>of();
-    for (Integer acceptedIdx: acceptedPredicatesIdx) {
-      acceptedPredicates_.add(offeredPredicates.get(acceptedIdx));
-    }
-    removeAcceptedConjuncts(acceptedPredicatesIdx, conjunctsIdx);
-  }
-
-  /**
-   * Converts the conjunct to a list of TBinaryPredicates if it contains only
-   * disjunctive predicates of the form {slotref} {op} {constant} that can be represented
-   * by TBinaryPredicates. If the Expr cannot be converted, null is returned.
-   * TODO: Move this to Expr.
-   */
-  private List<TBinaryPredicate> getDisjuncts(Expr conjunct) {
-    List<TBinaryPredicate> disjuncts = Lists.newArrayList();
-    if (getDisjunctsHelper(conjunct, disjuncts)) return disjuncts;
-    return null;
-  }
-
-  // Recursive helper method for getDisjuncts().
-  private boolean getDisjunctsHelper(Expr conjunct,
-      List<TBinaryPredicate> predicates) {
-    if (conjunct instanceof BinaryPredicate) {
-      if (conjunct.getChildren().size() != 2) return false;
-      SlotRef slotRef = null;
-      LiteralExpr literalExpr = null;
-      TComparisonOp op = null;
-      if ((conjunct.getChild(0).unwrapSlotRef(true) instanceof SlotRef) &&
-          (conjunct.getChild(1) instanceof LiteralExpr)) {
-        slotRef = conjunct.getChild(0).unwrapSlotRef(true);
-        literalExpr = (LiteralExpr) conjunct.getChild(1);
-        op = ((BinaryPredicate) conjunct).getOp().getThriftOp();
-      } else if ((conjunct.getChild(1).unwrapSlotRef(true) instanceof SlotRef) &&
-                 (conjunct.getChild(0) instanceof LiteralExpr)) {
-        slotRef = conjunct.getChild(1).unwrapSlotRef(true);
-        literalExpr = (LiteralExpr) conjunct.getChild(0);
-        op = ((BinaryPredicate) conjunct).getOp().converse().getThriftOp();
-      } else {
-        return false;
-      }
-
-      TColumnValue val = literalToColumnValue(literalExpr);
-      if (val == null) return false; // false if unsupported type, e.g.
-
-      String colName = Joiner.on(".").join(slotRef.getResolvedPath().getRawPath());
-      TColumnDesc col = new TColumnDesc().setName(colName).setType(
-          slotRef.getType().toThrift());
-      predicates.add(new TBinaryPredicate().setCol(col).setOp(op).setValue(val));
-      return true;
-    } else if (conjunct instanceof CompoundPredicate) {
-      CompoundPredicate compoundPredicate = ((CompoundPredicate) conjunct);
-      if (compoundPredicate.getOp() != CompoundPredicate.Operator.OR) return false;
-      if (!getDisjunctsHelper(conjunct.getChild(0), predicates)) return false;
-      if (!getDisjunctsHelper(conjunct.getChild(1), predicates)) return false;
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    inputCardinality_ = numRowsEstimate_;
-    cardinality_ = numRowsEstimate_;
-    cardinality_ *= computeSelectivity();
-    cardinality_ = Math.max(1, cardinality_);
-    cardinality_ = capAtLimit(cardinality_);
-
-    LOG.debug("computeStats DataSourceScan: cardinality=" + Long.toString(cardinality_));
-
-    numNodes_ = table_.getNumNodes();
-    LOG.debug("computeStats DataSourceScan: #nodes=" + Integer.toString(numNodes_));
-  }
-
-  @Override
-  protected String debugString() {
-    return Objects.toStringHelper(this)
-        .add("tid", desc_.getId().asInt())
-        .add("tblName", table_.getFullName())
-        .add("dataSource", DataSource.debugString(table_.getDataSource()))
-        .add("initString", table_.getInitString())
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  /**
-   * Removes the predicates from conjuncts_ that were accepted by the data source.
-   * Stores the accepted conjuncts in acceptedConjuncts_.
-   */
-  private void removeAcceptedConjuncts(List<Integer> acceptedPredicatesIdx,
-      List<Integer> conjunctsIdx) {
-    acceptedConjuncts_ = Lists.newArrayList();
-    // Because conjuncts_ is modified in place using positional indexes from
-    // conjunctsIdx, we remove the accepted predicates in reverse order.
-    for (int i = acceptedPredicatesIdx.size() - 1; i >= 0; --i) {
-      int acceptedPredIdx = acceptedPredicatesIdx.get(i);
-      int conjunctIdx = conjunctsIdx.get(acceptedPredIdx);
-      acceptedConjuncts_.add(conjuncts_.remove(conjunctIdx));
-    }
-    // Returns a view of the list in the original order as we will print these
-    // in the explain string and it's convenient to have predicates printed
-    // in the same order that they're specified.
-    acceptedConjuncts_ = Lists.reverse(acceptedConjuncts_);
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    Preconditions.checkNotNull(acceptedPredicates_);
-    msg.node_type = TPlanNodeType.DATA_SOURCE_NODE;
-    msg.data_source_node = new TDataSourceScanNode(desc_.getId().asInt(),
-        table_.getDataSource(), table_.getInitString(), acceptedPredicates_);
-  }
-
-  /**
-   * Create a single scan range for the localhost.
-   */
-  private void computeScanRangeLocations(Analyzer analyzer) {
-    // TODO: Does the port matter?
-    TNetworkAddress networkAddress = addressToTNetworkAddress("localhost:12345");
-    Integer hostIndex = analyzer.getHostIndex().getIndex(networkAddress);
-    scanRanges_ = Lists.newArrayList(
-        new TScanRangeLocations(
-            new TScanRange(), Lists.newArrayList(new TScanRangeLocation(hostIndex))));
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    // TODO: What's a good estimate of memory consumption?
-    perHostMemCost_ = 1024L * 1024L * 1024L;
-  }
-
-  /**
-   * Returns the per-host upper bound of memory that any number of concurrent scan nodes
-   * will use. Used for estimating the per-host memory requirement of queries.
-   */
-  public static long getPerHostMemUpperBound() {
-    // TODO: What's a good estimate of memory consumption?
-    return 1024L * 1024L * 1024L;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    String aliasStr = "";
-    if (!table_.getFullName().equalsIgnoreCase(desc_.getAlias()) &&
-        !table_.getName().equalsIgnoreCase(desc_.getAlias())) {
-      aliasStr = " " + desc_.getAlias();
-    }
-    output.append(String.format("%s%s:%s [%s%s]\n", prefix, id_.toString(),
-        displayName_, table_.getFullName(), aliasStr));
-
-    if (!acceptedConjuncts_.isEmpty()) {
-      output.append(prefix + "data source predicates: " +
-          getExplainString(acceptedConjuncts_) + "\n");
-    }
-    if (!conjuncts_.isEmpty()) {
-      output.append(prefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-    }
-
-    // Add table and column stats in verbose mode.
-    if (detailLevel == TExplainLevel.VERBOSE) {
-      output.append(getStatsExplainString(prefix, detailLevel));
-      output.append("\n");
-    }
-    return output.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/DataStreamSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/DataStreamSink.java b/fe/src/main/java/com/cloudera/impala/planner/DataStreamSink.java
deleted file mode 100644
index 514a791..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/DataStreamSink.java
+++ /dev/null
@@ -1,61 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TDataStreamSink;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.google.common.base.Preconditions;
-
-/**
- * Data sink that forwards data to an exchange node.
- */
-public class DataStreamSink extends DataSink {
-  private final ExchangeNode exchNode_;
-  private final DataPartition outputPartition_;
-
-  public DataStreamSink(ExchangeNode exchNode, DataPartition partition) {
-    Preconditions.checkNotNull(exchNode);
-    Preconditions.checkNotNull(partition);
-    exchNode_ = exchNode;
-    outputPartition_ = partition;
-  }
-
-  @Override
-  public String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(
-        String.format("%sDATASTREAM SINK [FRAGMENT=%s, EXCHANGE=%s, %s]",
-        prefix, exchNode_.getFragment().getId().toString(),
-        exchNode_.getId().toString(), exchNode_.getDisplayLabelDetail()));
-    return output.toString();
-  }
-
-  @Override
-  protected TDataSink toThrift() {
-    TDataSink result = new TDataSink(TDataSinkType.DATA_STREAM_SINK);
-    TDataStreamSink tStreamSink =
-        new TDataStreamSink(exchNode_.getId().asInt(), outputPartition_.toThrift());
-    result.setStream_sink(tStreamSink);
-    return result;
-  }
-
-  public DataPartition getOutputPartition() { return outputPartition_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/DistributedPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/DistributedPlanner.java b/fe/src/main/java/com/cloudera/impala/planner/DistributedPlanner.java
deleted file mode 100644
index b38b018..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/DistributedPlanner.java
+++ /dev/null
@@ -1,1019 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-
-/**
- * The distributed planner is responsible for creating an executable, distributed plan
- * from a single-node plan that can be sent to the backend.
- */
-public class DistributedPlanner {
-  private final static Logger LOG = LoggerFactory.getLogger(DistributedPlanner.class);
-
-  private final PlannerContext ctx_;
-
-  public DistributedPlanner(PlannerContext ctx) {
-    ctx_ = ctx;
-  }
-
-  /**
-   * Create plan fragments for a single-node plan considering a set of execution options.
-   * The fragments are returned in a list such that element i of that list can
-   * only consume output of the following fragments j > i.
-   *
-   * TODO: take data partition of the plan fragments into account; in particular,
-   * coordinate between hash partitioning for aggregation and hash partitioning
-   * for analytic computation more generally than what createQueryPlan() does
-   * right now (the coordination only happens if the same select block does both
-   * the aggregation and analytic computation).
-   */
-  public ArrayList<PlanFragment> createPlanFragments(
-      PlanNode singleNodePlan) throws ImpalaException {
-    Preconditions.checkState(!ctx_.isSingleNodeExec());
-    AnalysisContext.AnalysisResult analysisResult = ctx_.getAnalysisResult();
-    QueryStmt queryStmt = ctx_.getQueryStmt();
-    ArrayList<PlanFragment> fragments = Lists.newArrayList();
-    // For inserts or CTAS, unless there is a limit, leave the root fragment
-    // partitioned, otherwise merge everything into a single coordinator fragment,
-    // so we can pass it back to the client.
-    boolean isPartitioned = false;
-    if ((analysisResult.isInsertStmt() || analysisResult.isCreateTableAsSelectStmt()
-        || analysisResult.isUpdateStmt() || analysisResult.isDeleteStmt())
-        && !singleNodePlan.hasLimit()) {
-      Preconditions.checkState(!queryStmt.hasOffset());
-      isPartitioned = true;
-    }
-    LOG.debug("create plan fragments");
-    long perNodeMemLimit = ctx_.getQueryOptions().mem_limit;
-    LOG.debug("memlimit=" + Long.toString(perNodeMemLimit));
-    createPlanFragments(singleNodePlan, isPartitioned, perNodeMemLimit, fragments);
-    return fragments;
-  }
-
-  /**
-   * Return plan fragment that produces result of 'root'; recursively creates
-   * all input fragments to the returned fragment.
-   * If a new fragment is created, it is appended to 'fragments', so that
-   * each fragment is preceded by those from which it consumes the output.
-   * If 'isPartitioned' is false, the returned fragment is unpartitioned;
-   * otherwise it may be partitioned, depending on whether its inputs are
-   * partitioned; the partition function is derived from the inputs.
-   */
-  private PlanFragment createPlanFragments(
-      PlanNode root, boolean isPartitioned,
-      long perNodeMemLimit, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    ArrayList<PlanFragment> childFragments = Lists.newArrayList();
-    for (PlanNode child: root.getChildren()) {
-      // allow child fragments to be partitioned, unless they contain a limit clause
-      // (the result set with the limit constraint needs to be computed centrally);
-      // merge later if needed
-      boolean childIsPartitioned = !child.hasLimit();
-      // Do not fragment the subplan of a SubplanNode since it is executed locally.
-      if (root instanceof SubplanNode && child == root.getChild(1)) continue;
-      childFragments.add(
-          createPlanFragments(
-            child, childIsPartitioned, perNodeMemLimit, fragments));
-    }
-
-    PlanFragment result = null;
-    if (root instanceof ScanNode) {
-      result = createScanFragment(root);
-      fragments.add(result);
-    } else if (root instanceof HashJoinNode) {
-      Preconditions.checkState(childFragments.size() == 2);
-      result = createHashJoinFragment(
-          (HashJoinNode) root, childFragments.get(1), childFragments.get(0),
-          perNodeMemLimit, fragments);
-    } else if (root instanceof NestedLoopJoinNode) {
-      Preconditions.checkState(childFragments.size() == 2);
-      result = createNestedLoopJoinFragment(
-          (NestedLoopJoinNode) root, childFragments.get(1), childFragments.get(0),
-          perNodeMemLimit, fragments);
-    } else if (root instanceof SubplanNode) {
-      Preconditions.checkState(childFragments.size() == 1);
-      result = createSubplanNodeFragment((SubplanNode) root, childFragments.get(0));
-    } else if (root instanceof SelectNode) {
-      result = createSelectNodeFragment((SelectNode) root, childFragments);
-    } else if (root instanceof UnionNode) {
-      result = createUnionNodeFragment((UnionNode) root, childFragments, fragments);
-    } else if (root instanceof AggregationNode) {
-      result = createAggregationFragment(
-          (AggregationNode) root, childFragments.get(0), fragments);
-    } else if (root instanceof SortNode) {
-      if (((SortNode) root).isAnalyticSort()) {
-        // don't parallelize this like a regular SortNode
-        result = createAnalyticFragment(
-            root, childFragments.get(0), fragments);
-      } else {
-        result = createOrderByFragment(
-            (SortNode) root, childFragments.get(0), fragments);
-      }
-    } else if (root instanceof AnalyticEvalNode) {
-      result = createAnalyticFragment(root, childFragments.get(0), fragments);
-    } else if (root instanceof EmptySetNode) {
-      result = new PlanFragment(
-          ctx_.getNextFragmentId(), root, DataPartition.UNPARTITIONED);
-    } else {
-      throw new InternalException(
-          "Cannot create plan fragment for this node type: " + root.getExplainString());
-    }
-    // move 'result' to end, it depends on all of its children
-    fragments.remove(result);
-    fragments.add(result);
-
-    if (!isPartitioned && result.isPartitioned()) {
-      result = createMergeFragment(result);
-      fragments.add(result);
-    }
-
-    return result;
-  }
-
-  /**
-   * Returns the product of the distinct value estimates of the individual exprs
-   * or -1 if any of them doesn't have a distinct value estimate.
-   */
-  private long getNumDistinctValues(List<Expr> exprs) {
-    long result = 1;
-    for (Expr expr: exprs) {
-      result *= expr.getNumDistinctValues();
-      if (result < 0) return -1;
-    }
-    return result;
-  }
-
-  /**
-   * Decides whether to repartition the output of 'inputFragment' before feeding its
-   * data into the table sink of the given 'insertStmt'. The decision obeys the
-   * shuffle/noshuffle plan hints if present. Otherwise, returns a plan fragment that
-   * partitions the output of 'inputFragment' on the partition exprs of 'insertStmt',
-   * unless the expected number of partitions is less than the number of nodes on which
-   * inputFragment runs, or the target table is unpartitioned.
-   * For inserts into unpartitioned tables or inserts with only constant partition exprs,
-   * the shuffle hint leads to a plan that merges all rows at the coordinator where
-   * the table sink is executed.
-   * If this functions ends up creating a new fragment, appends that to 'fragments'.
-   */
-  public PlanFragment createInsertFragment(
-      PlanFragment inputFragment, InsertStmt insertStmt, Analyzer analyzer,
-      ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    if (insertStmt.hasNoShuffleHint()) return inputFragment;
-
-    List<Expr> partitionExprs = Lists.newArrayList(insertStmt.getPartitionKeyExprs());
-    // Ignore constants for the sake of partitioning.
-    Expr.removeConstants(partitionExprs);
-
-    // Do nothing if the input fragment is already appropriately partitioned.
-    DataPartition inputPartition = inputFragment.getDataPartition();
-    if (!partitionExprs.isEmpty() &&
-        analyzer.equivSets(inputPartition.getPartitionExprs(), partitionExprs)) {
-      return inputFragment;
-    }
-
-    // Make a cost-based decision only if no user hint was supplied.
-    if (!insertStmt.hasShuffleHint()) {
-      // If the existing partition exprs are a subset of the table partition exprs, check
-      // if it is distributed across all nodes. If so, don't repartition.
-      if (Expr.isSubset(inputPartition.getPartitionExprs(), partitionExprs)) {
-        long numPartitions = getNumDistinctValues(inputPartition.getPartitionExprs());
-        if (numPartitions >= inputFragment.getNumNodes()) return inputFragment;
-      }
-
-      // Don't repartition if we know we have fewer partitions than nodes
-      // (ie, default to repartitioning if col stats are missing).
-      // TODO: We want to repartition if the resulting files would otherwise
-      // be very small (less than some reasonable multiple of the recommended block size).
-      // In order to do that, we need to come up with an estimate of the avg row size
-      // in the particular file format of the output table/partition.
-      // We should always know on how many nodes our input is running.
-      long numPartitions = getNumDistinctValues(partitionExprs);
-      Preconditions.checkState(inputFragment.getNumNodes() != -1);
-      if (numPartitions > 0 && numPartitions <= inputFragment.getNumNodes()) {
-        return inputFragment;
-      }
-    }
-
-    ExchangeNode exchNode =
-        new ExchangeNode(ctx_.getNextNodeId(), inputFragment.getPlanRoot());
-    exchNode.init(analyzer);
-    Preconditions.checkState(exchNode.hasValidStats());
-    DataPartition partition;
-    if (partitionExprs.isEmpty()) {
-      partition = DataPartition.UNPARTITIONED;
-    } else {
-      partition = DataPartition.hashPartitioned(partitionExprs);
-    }
-    PlanFragment fragment =
-        new PlanFragment(ctx_.getNextFragmentId(), exchNode, partition);
-    inputFragment.setDestination(exchNode);
-    inputFragment.setOutputPartition(partition);
-    fragments.add(fragment);
-    return fragment;
-  }
-
-  /**
-   * Return unpartitioned fragment that merges the input fragment's output via
-   * an ExchangeNode.
-   * Requires that input fragment be partitioned.
-   */
-  private PlanFragment createMergeFragment(PlanFragment inputFragment)
-      throws ImpalaException {
-    Preconditions.checkState(inputFragment.isPartitioned());
-    ExchangeNode mergePlan =
-        new ExchangeNode(ctx_.getNextNodeId(), inputFragment.getPlanRoot());
-    mergePlan.init(ctx_.getRootAnalyzer());
-    Preconditions.checkState(mergePlan.hasValidStats());
-    PlanFragment fragment = new PlanFragment(ctx_.getNextFragmentId(), mergePlan,
-        DataPartition.UNPARTITIONED);
-    inputFragment.setDestination(mergePlan);
-    return fragment;
-  }
-
-  /**
-   * Create new randomly-partitioned fragment containing a single scan node.
-   * TODO: take bucketing into account to produce a naturally hash-partitioned
-   * fragment
-   * TODO: hbase scans are range-partitioned on the row key
-   */
-  private PlanFragment createScanFragment(PlanNode node) {
-    return new PlanFragment(ctx_.getNextFragmentId(), node, DataPartition.RANDOM);
-  }
-
-  /**
-   * Adds the SubplanNode as the new plan root to the child fragment and returns
-   * the child fragment.
-   */
-  private PlanFragment createSubplanNodeFragment(SubplanNode node,
-      PlanFragment childFragment) {
-    node.setChild(0, childFragment.getPlanRoot());
-    childFragment.setPlanRoot(node);
-    return childFragment;
-  }
-
-  /**
-   * Modifies the leftChildFragment to execute a cross join. The right child input is
-   * provided by an ExchangeNode, which is the destination of the rightChildFragment's
-   * output.
-   */
-  private PlanFragment createNestedLoopJoinFragment(NestedLoopJoinNode node,
-      PlanFragment rightChildFragment, PlanFragment leftChildFragment,
-      long perNodeMemLimit, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    node.setDistributionMode(DistributionMode.BROADCAST);
-    node.setChild(0, leftChildFragment.getPlanRoot());
-    connectChildFragment(node, 1, leftChildFragment, rightChildFragment);
-    leftChildFragment.setPlanRoot(node);
-    return leftChildFragment;
-  }
-
-  /**
-   * Helper function to produce a partitioning hash-join fragment
-   */
-  private PlanFragment createPartitionedHashJoinFragment(HashJoinNode node,
-      Analyzer analyzer, boolean lhsHasCompatPartition, boolean rhsHasCompatPartition,
-      PlanFragment leftChildFragment, PlanFragment rightChildFragment,
-      List<Expr> lhsJoinExprs, List<Expr> rhsJoinExprs,
-      ArrayList<PlanFragment> fragments) throws ImpalaException {
-    node.setDistributionMode(HashJoinNode.DistributionMode.PARTITIONED);
-    // The lhs and rhs input fragments are already partitioned on the join exprs.
-    // Combine the lhs/rhs input fragments into leftChildFragment by placing the join
-    // node into leftChildFragment and setting its lhs/rhs children to the plan root of
-    // the lhs/rhs child fragment, respectively. No new child fragments or exchanges
-    // are created, and the rhs fragment is removed.
-    // TODO: Relax the isCompatPartition() check below. The check is conservative and
-    // may reject partitions that could be made physically compatible. Fix this by
-    // removing equivalent duplicates from partition exprs and impose a canonical order
-    // on partition exprs (both using the canonical equivalence class representatives).
-    if (lhsHasCompatPartition
-        && rhsHasCompatPartition
-        && isCompatPartition(
-            leftChildFragment.getDataPartition(),
-            rightChildFragment.getDataPartition(),
-            lhsJoinExprs, rhsJoinExprs, analyzer)) {
-      node.setChild(0, leftChildFragment.getPlanRoot());
-      node.setChild(1, rightChildFragment.getPlanRoot());
-      // fix up PlanNode.fragment_ for the migrated PlanNode tree of the rhs child
-      leftChildFragment.setFragmentInPlanTree(node.getChild(1));
-      // Relocate input fragments of rightChildFragment to leftChildFragment.
-      for (PlanFragment rhsInput: rightChildFragment.getChildren()) {
-        leftChildFragment.getChildren().add(rhsInput);
-      }
-      // Remove right fragment because its plan tree has been merged into leftFragment.
-      fragments.remove(rightChildFragment);
-      leftChildFragment.setPlanRoot(node);
-      return leftChildFragment;
-    }
-
-    // The lhs input fragment is already partitioned on the join exprs.
-    // Make the HashJoin the new root of leftChildFragment and set the join's
-    // first child to the lhs plan root. The second child of the join is an
-    // ExchangeNode that is fed by the rhsInputFragment whose sink repartitions
-    // its data by the rhs join exprs.
-    DataPartition rhsJoinPartition = null;
-    if (lhsHasCompatPartition) {
-      rhsJoinPartition = getCompatPartition(lhsJoinExprs,
-          leftChildFragment.getDataPartition(), rhsJoinExprs, analyzer);
-      if (rhsJoinPartition != null) {
-        node.setChild(0, leftChildFragment.getPlanRoot());
-        connectChildFragment(node, 1, leftChildFragment, rightChildFragment);
-        rightChildFragment.setOutputPartition(rhsJoinPartition);
-        leftChildFragment.setPlanRoot(node);
-        return leftChildFragment;
-      }
-    }
-
-    // Same as above but with rhs and lhs reversed.
-    DataPartition lhsJoinPartition = null;
-    if (rhsHasCompatPartition) {
-      lhsJoinPartition = getCompatPartition(rhsJoinExprs,
-          rightChildFragment.getDataPartition(), lhsJoinExprs, analyzer);
-      if (lhsJoinPartition != null) {
-        node.setChild(1, rightChildFragment.getPlanRoot());
-        connectChildFragment(node, 0, rightChildFragment, leftChildFragment);
-        leftChildFragment.setOutputPartition(lhsJoinPartition);
-        rightChildFragment.setPlanRoot(node);
-        return rightChildFragment;
-      }
-    }
-
-    Preconditions.checkState(lhsJoinPartition == null);
-    Preconditions.checkState(rhsJoinPartition == null);
-    lhsJoinPartition = DataPartition.hashPartitioned(Expr.cloneList(lhsJoinExprs));
-    rhsJoinPartition = DataPartition.hashPartitioned(Expr.cloneList(rhsJoinExprs));
-
-    // Neither lhs nor rhs are already partitioned on the join exprs.
-    // Create a new parent fragment containing a HashJoin node with two
-    // ExchangeNodes as inputs; the latter are the destinations of the
-    // left- and rightChildFragments, which now partition their output
-    // on their respective join exprs.
-    // The new fragment is hash-partitioned on the lhs input join exprs.
-    ExchangeNode lhsExchange =
-        new ExchangeNode(ctx_.getNextNodeId(), leftChildFragment.getPlanRoot());
-    lhsExchange.computeStats(null);
-    node.setChild(0, lhsExchange);
-    ExchangeNode rhsExchange =
-        new ExchangeNode(ctx_.getNextNodeId(), rightChildFragment.getPlanRoot());
-    rhsExchange.computeStats(null);
-    node.setChild(1, rhsExchange);
-
-    // Connect the child fragments in a new fragment, and set the data partition
-    // of the new fragment and its child fragments.
-    PlanFragment joinFragment =
-        new PlanFragment(ctx_.getNextFragmentId(), node, lhsJoinPartition);
-    leftChildFragment.setDestination(lhsExchange);
-    leftChildFragment.setOutputPartition(lhsJoinPartition);
-    rightChildFragment.setDestination(rhsExchange);
-    rightChildFragment.setOutputPartition(rhsJoinPartition);
-    return joinFragment;
-  }
-
-  /**
-   * Creates either a broadcast join or a repartitioning join, depending on the
-   * expected cost.
-   * If any of the inputs to the cost computation is unknown, it assumes the cost
-   * will be 0. Costs being equal, it'll favor partitioned over broadcast joins.
-   * If perNodeMemLimit > 0 and the size of the hash table for a broadcast join is
-   * expected to exceed that mem limit, switches to partitioned join instead.
-   * TODO: revisit the choice of broadcast as the default
-   * TODO: don't create a broadcast join if we already anticipate that this will
-   * exceed the query's memory budget.
-   */
-  private PlanFragment createHashJoinFragment(
-      HashJoinNode node, PlanFragment rightChildFragment,
-      PlanFragment leftChildFragment, long perNodeMemLimit,
-      ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    // For both join types, the total cost is calculated as the amount of data
-    // sent over the network, plus the amount of data inserted into the hash table.
-    // broadcast: send the rightChildFragment's output to each node executing
-    // the leftChildFragment, and build a hash table with it on each node.
-    Analyzer analyzer = ctx_.getRootAnalyzer();
-    PlanNode rhsTree = rightChildFragment.getPlanRoot();
-    long rhsDataSize = 0;
-    long broadcastCost = Long.MAX_VALUE;
-    if (rhsTree.getCardinality() != -1) {
-      rhsDataSize = Math.round(
-          rhsTree.getCardinality() * ExchangeNode.getAvgSerializedRowSize(rhsTree));
-      if (leftChildFragment.getNumNodes() != -1) {
-        broadcastCost = 2 * rhsDataSize * leftChildFragment.getNumNodes();
-      }
-    }
-    LOG.debug("broadcast: cost=" + Long.toString(broadcastCost));
-    LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
-        + Float.toString(rhsTree.getAvgRowSize()) + " #nodes="
-        + Integer.toString(leftChildFragment.getNumNodes()));
-
-    // repartition: both left- and rightChildFragment are partitioned on the
-    // join exprs, and a hash table is built with the rightChildFragment's output.
-    PlanNode lhsTree = leftChildFragment.getPlanRoot();
-    long partitionCost = Long.MAX_VALUE;
-    List<Expr> lhsJoinExprs = Lists.newArrayList();
-    List<Expr> rhsJoinExprs = Lists.newArrayList();
-    for (Expr joinConjunct: node.getEqJoinConjuncts()) {
-      // no remapping necessary
-      lhsJoinExprs.add(joinConjunct.getChild(0).clone());
-      rhsJoinExprs.add(joinConjunct.getChild(1).clone());
-    }
-    boolean lhsHasCompatPartition = false;
-    boolean rhsHasCompatPartition = false;
-    if (lhsTree.getCardinality() != -1 && rhsTree.getCardinality() != -1) {
-      lhsHasCompatPartition = analyzer.equivSets(lhsJoinExprs,
-          leftChildFragment.getDataPartition().getPartitionExprs());
-      rhsHasCompatPartition = analyzer.equivSets(rhsJoinExprs,
-          rightChildFragment.getDataPartition().getPartitionExprs());
-
-      double lhsNetworkCost = (lhsHasCompatPartition) ? 0.0 :
-        Math.round(
-            lhsTree.getCardinality() * ExchangeNode.getAvgSerializedRowSize(lhsTree));
-      double rhsNetworkCost = (rhsHasCompatPartition) ? 0.0 : rhsDataSize;
-      partitionCost = Math.round(lhsNetworkCost + rhsNetworkCost + rhsDataSize);
-    }
-    LOG.debug("partition: cost=" + Long.toString(partitionCost));
-    LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " row_size="
-        + Float.toString(lhsTree.getAvgRowSize()));
-    LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
-        + Float.toString(rhsTree.getAvgRowSize()));
-    LOG.debug(rhsTree.getExplainString());
-
-    boolean doBroadcast = false;
-    // we do a broadcast join if
-    // - we're explicitly told to do so
-    // - or if it's cheaper and we weren't explicitly told to do a partitioned join
-    // - and we're not doing a full outer or right outer/semi join (those require the
-    //   left-hand side to be partitioned for correctness)
-    // - and the expected size of the hash tbl doesn't exceed perNodeMemLimit
-    // - or we are doing a null-aware left anti join (broadcast is required for
-    //   correctness)
-    // we do a "<=" comparison of the costs so that we default to broadcast joins if
-    // we're unable to estimate the cost
-    if ((node.getJoinOp() != JoinOperator.RIGHT_OUTER_JOIN
-        && node.getJoinOp() != JoinOperator.FULL_OUTER_JOIN
-        && node.getJoinOp() != JoinOperator.RIGHT_SEMI_JOIN
-        && node.getJoinOp() != JoinOperator.RIGHT_ANTI_JOIN
-        // a broadcast join hint overides the check to see if the hash table
-        // size is less than the pernode memlimit
-        && (node.getDistributionModeHint() == DistributionMode.BROADCAST
-            || perNodeMemLimit == 0
-            || Math.round(rhsDataSize * PlannerContext.HASH_TBL_SPACE_OVERHEAD)
-                <= perNodeMemLimit)
-        // a broadcast join hint overrides the check to see if performing a broadcast
-        // join is more costly than a partitioned join
-        && (node.getDistributionModeHint() == DistributionMode.BROADCAST
-            || (node.getDistributionModeHint() != DistributionMode.PARTITIONED
-                && broadcastCost <= partitionCost)))
-        || node.getJoinOp().isNullAwareLeftAntiJoin()) {
-      doBroadcast = true;
-    }
-
-    PlanFragment hjFragment = null;
-    if (doBroadcast) {
-      node.setDistributionMode(HashJoinNode.DistributionMode.BROADCAST);
-      // Doesn't create a new fragment, but modifies leftChildFragment to execute
-      // the join; the build input is provided by an ExchangeNode, which is the
-      // destination of the rightChildFragment's output
-      node.setChild(0, leftChildFragment.getPlanRoot());
-      connectChildFragment(node, 1, leftChildFragment, rightChildFragment);
-      leftChildFragment.setPlanRoot(node);
-      hjFragment = leftChildFragment;
-    } else {
-      hjFragment = createPartitionedHashJoinFragment(node, analyzer,
-          lhsHasCompatPartition, rhsHasCompatPartition, leftChildFragment,
-          rightChildFragment, lhsJoinExprs, rhsJoinExprs, fragments);
-    }
-
-    for (RuntimeFilter filter: node.getRuntimeFilters()) {
-      filter.setIsBroadcast(doBroadcast);
-      filter.computeHasLocalTargets();
-      // Work around IMPALA-3450, where cardinalities might be wrong in single-node plans
-      // with UNION and LIMITs.
-      // TODO: Remove.
-      filter.computeNdvEstimate();
-    }
-    return hjFragment;
- }
-
-  /**
-   * Returns true if the lhs and rhs partitions are physically compatible for executing
-   * a partitioned join with the given lhs/rhs join exprs. Physical compatibility means
-   * that lhs/rhs exchange nodes hashing on exactly those partition expressions are
-   * guaranteed to send two rows with identical partition-expr values to the same node.
-   * The requirements for physical compatibility are:
-   * 1. Number of exprs must be the same
-   * 2. The lhs partition exprs are identical to the lhs join exprs and the rhs partition
-   *    exprs are identical to the rhs join exprs
-   * 3. Or for each expr in the lhs partition, there must be an equivalent expr in the
-   *    rhs partition at the same ordinal position within the expr list
-   * (4. The expr types must be identical, but that is enforced later in PlanFragment)
-   * Conditions 2 and 3 are similar but not the same due to outer joins, e.g., for full
-   * outer joins condition 3 can never be met, but condition 2 can.
-   * TODO: Move parts of this function into DataPartition as appropriate.
-   */
-  private boolean isCompatPartition(DataPartition lhsPartition,
-      DataPartition rhsPartition, List<Expr> lhsJoinExprs, List<Expr> rhsJoinExprs,
-      Analyzer analyzer) {
-    List<Expr> lhsPartExprs = lhsPartition.getPartitionExprs();
-    List<Expr> rhsPartExprs = rhsPartition.getPartitionExprs();
-    // 1. Sizes must be equal.
-    if (lhsPartExprs.size() != rhsPartExprs.size()) return false;
-    // 2. Lhs/rhs join exprs are identical to lhs/rhs partition exprs.
-    Preconditions.checkState(lhsJoinExprs.size() == rhsJoinExprs.size());
-    if (lhsJoinExprs.size() == lhsPartExprs.size()) {
-      if (lhsJoinExprs.equals(lhsPartExprs) && rhsJoinExprs.equals(rhsPartExprs)) {
-        return true;
-      }
-    }
-    // 3. Each lhs part expr must have an equivalent expr at the same position
-    // in the rhs part exprs.
-    for (int i = 0; i < lhsPartExprs.size(); ++i) {
-      if (!analyzer.equivExprs(lhsPartExprs.get(i), rhsPartExprs.get(i))) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns a new data partition that is suitable for creating an exchange node to feed
-   * a partitioned hash join. The hash join is assumed to be placed in a fragment with an
-   * existing data partition that is compatible with either the lhs or rhs join exprs
-   * (srcPartition belongs to the fragment and srcJoinExprs are the compatible exprs).
-   * The returned partition uses the given joinExprs which are assumed to be the lhs or
-   * rhs join exprs, whichever srcJoinExprs are not.
-   * The returned data partition has two important properties to ensure correctness:
-   * 1. It has exactly the same number of hash exprs as the srcPartition (IMPALA-1307),
-   *    possibly by removing redundant exprs from joinExprs or adding some joinExprs
-   *    multiple times to match the srcPartition
-   * 2. The hash exprs are ordered based on their corresponding 'matches' in
-   *    the existing srcPartition (IMPALA-1324).
-   * Returns null if no compatible data partition could be constructed.
-   * TODO: Move parts of this function into DataPartition as appropriate.
-   * TODO: Make comment less operational and more semantic.
-   */
-  private DataPartition getCompatPartition(List<Expr> srcJoinExprs,
-      DataPartition srcPartition, List<Expr> joinExprs, Analyzer analyzer) {
-    Preconditions.checkState(srcPartition.isHashPartitioned());
-    List<Expr> srcPartExprs = srcPartition.getPartitionExprs();
-    List<Expr> resultPartExprs = Lists.newArrayList();
-    for (int i = 0; i < srcPartExprs.size(); ++i) {
-      for (int j = 0; j < srcJoinExprs.size(); ++j) {
-        if (analyzer.equivExprs(srcPartExprs.get(i), srcJoinExprs.get(j))) {
-          resultPartExprs.add(joinExprs.get(j).clone());
-          break;
-        }
-      }
-    }
-    if (resultPartExprs.size() != srcPartExprs.size()) return null;
-    return DataPartition.hashPartitioned(resultPartExprs);
-  }
-
-  /**
-   * Returns a new fragment with a UnionNode as its root. The data partition of the
-   * returned fragment and how the data of the child fragments is consumed depends on the
-   * data partitions of the child fragments:
-   * - All child fragments are unpartitioned or partitioned: The returned fragment has an
-   *   UNPARTITIONED or RANDOM data partition, respectively. The UnionNode absorbs the
-   *   plan trees of all child fragments.
-   * - Mixed partitioned/unpartitioned child fragments: The returned fragment is
-   *   RANDOM partitioned. The plan trees of all partitioned child fragments are absorbed
-   *   into the UnionNode. All unpartitioned child fragments are connected to the
-   *   UnionNode via a RANDOM exchange, and remain unchanged otherwise.
-   */
-  private PlanFragment createUnionNodeFragment(UnionNode unionNode,
-      ArrayList<PlanFragment> childFragments, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    Preconditions.checkState(unionNode.getChildren().size() == childFragments.size());
-
-    // A UnionNode could have no children or constant selects if all of its operands
-    // were dropped because of constant predicates that evaluated to false.
-    if (unionNode.getChildren().isEmpty()) {
-      return new PlanFragment(
-          ctx_.getNextFragmentId(), unionNode, DataPartition.UNPARTITIONED);
-    }
-
-    Preconditions.checkState(!childFragments.isEmpty());
-    int numUnpartitionedChildFragments = 0;
-    for (int i = 0; i < childFragments.size(); ++i) {
-      if (!childFragments.get(i).isPartitioned()) ++numUnpartitionedChildFragments;
-    }
-
-    // remove all children to avoid them being tagged with the wrong
-    // fragment (in the PlanFragment c'tor; we haven't created ExchangeNodes yet)
-    unionNode.clearChildren();
-
-    // If all child fragments are unpartitioned, return a single unpartitioned fragment
-    // with a UnionNode that merges all child fragments.
-    if (numUnpartitionedChildFragments == childFragments.size()) {
-      PlanFragment unionFragment = new PlanFragment(ctx_.getNextFragmentId(),
-          unionNode, DataPartition.UNPARTITIONED);
-      // Absorb the plan trees of all childFragments into unionNode
-      // and fix up the fragment tree in the process.
-      for (int i = 0; i < childFragments.size(); ++i) {
-        unionNode.addChild(childFragments.get(i).getPlanRoot());
-        unionFragment.setFragmentInPlanTree(unionNode.getChild(i));
-        unionFragment.addChildren(childFragments.get(i).getChildren());
-      }
-      unionNode.init(ctx_.getRootAnalyzer());
-      // All child fragments have been absorbed into unionFragment.
-      fragments.removeAll(childFragments);
-      return unionFragment;
-    }
-
-    // There is at least one partitioned child fragment.
-    PlanFragment unionFragment = new PlanFragment(
-        ctx_.getNextFragmentId(), unionNode, DataPartition.RANDOM);
-    for (int i = 0; i < childFragments.size(); ++i) {
-      PlanFragment childFragment = childFragments.get(i);
-      if (childFragment.isPartitioned()) {
-        // absorb the plan trees of all partitioned child fragments into unionNode
-        unionNode.addChild(childFragment.getPlanRoot());
-        unionFragment.setFragmentInPlanTree(unionNode.getChild(i));
-        unionFragment.addChildren(childFragment.getChildren());
-        fragments.remove(childFragment);
-      } else {
-        // dummy entry for subsequent addition of the ExchangeNode
-        unionNode.addChild(null);
-        // Connect the unpartitioned child fragments to unionNode via a random exchange.
-        connectChildFragment(unionNode, i, unionFragment, childFragment);
-        childFragment.setOutputPartition(DataPartition.RANDOM);
-      }
-    }
-    unionNode.reorderOperands(ctx_.getRootAnalyzer());
-    unionNode.init(ctx_.getRootAnalyzer());
-    return unionFragment;
-  }
-
-  /**
-   * Adds the SelectNode as the new plan root to the child fragment and returns
-   * the child fragment.
-   */
-  private PlanFragment createSelectNodeFragment(SelectNode selectNode,
-      ArrayList<PlanFragment> childFragments) {
-    Preconditions.checkState(selectNode.getChildren().size() == childFragments.size());
-    PlanFragment childFragment = childFragments.get(0);
-    // set the child explicitly, an ExchangeNode might have been inserted
-    // (whereas selectNode.child[0] would point to the original child)
-    selectNode.setChild(0, childFragment.getPlanRoot());
-    childFragment.setPlanRoot(selectNode);
-    return childFragment;
-  }
-
-  /**
-   * Replace node's child at index childIdx with an ExchangeNode that receives its
-   * input from childFragment. ParentFragment contains node and the new ExchangeNode.
-   */
-  private void connectChildFragment(PlanNode node, int childIdx,
-      PlanFragment parentFragment, PlanFragment childFragment) throws ImpalaException {
-    ExchangeNode exchangeNode =
-        new ExchangeNode(ctx_.getNextNodeId(), childFragment.getPlanRoot());
-    exchangeNode.init(ctx_.getRootAnalyzer());
-    exchangeNode.setFragment(parentFragment);
-    node.setChild(childIdx, exchangeNode);
-    childFragment.setDestination(exchangeNode);
-  }
-
-  /**
-   * Create a new fragment containing a single ExchangeNode that consumes the output
-   * of childFragment, set the destination of childFragment to the new parent
-   * and the output partition of childFragment to that of the new parent.
-   * TODO: the output partition of a child isn't necessarily the same as the data
-   * partition of the receiving parent (if there is more materialization happening
-   * in the parent, such as during distinct aggregation). Do we care about the data
-   * partition of the parent being applicable to the *output* of the parent (it's
-   * correct for the input).
-   */
-  private PlanFragment createParentFragment(
-      PlanFragment childFragment, DataPartition parentPartition)
-      throws ImpalaException {
-    ExchangeNode exchangeNode =
-        new ExchangeNode(ctx_.getNextNodeId(), childFragment.getPlanRoot());
-    exchangeNode.init(ctx_.getRootAnalyzer());
-    PlanFragment parentFragment = new PlanFragment(ctx_.getNextFragmentId(),
-        exchangeNode, parentPartition);
-    childFragment.setDestination(exchangeNode);
-    childFragment.setOutputPartition(parentPartition);
-    return parentFragment;
-  }
-
-  /**
-   * Returns a fragment that materializes the aggregation result of 'node'.
-   * If the child fragment is partitioned, the result fragment will be partitioned on
-   * the grouping exprs of 'node'.
-   * If 'node' is phase 1 of a 2-phase DISTINCT aggregation, this will simply
-   * add 'node' to the child fragment and return the child fragment; the new
-   * fragment will be created by the subsequent call of createAggregationFragment()
-   * for the phase 2 AggregationNode.
-   */
-  private PlanFragment createAggregationFragment(AggregationNode node,
-      PlanFragment childFragment, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    if (!childFragment.isPartitioned()) {
-      // nothing to distribute; do full aggregation directly within childFragment
-      childFragment.addPlanRoot(node);
-      return childFragment;
-    }
-
-    if (node.getAggInfo().isDistinctAgg()) {
-      // 'node' is phase 1 of a DISTINCT aggregation; the actual agg fragment
-      // will get created in the next createAggregationFragment() call
-      // for the parent AggregationNode
-      childFragment.addPlanRoot(node);
-      return childFragment;
-    }
-
-    // Check if 'node' is phase 2 of a DISTINCT aggregation.
-    boolean isDistinct = node.getChild(0) instanceof AggregationNode
-          && ((AggregationNode)(node.getChild(0))).getAggInfo().isDistinctAgg();
-    if (isDistinct) {
-      return createPhase2DistinctAggregationFragment(node, childFragment, fragments);
-    } else {
-      return createMergeAggregationFragment(node, childFragment);
-    }
-  }
-
-  /**
-   * Returns a fragment that materializes the final result of an aggregation where
-   * 'childFragment' is a partitioned fragment and 'node' is not part of a distinct
-   * aggregation.
-   */
-  private PlanFragment createMergeAggregationFragment(
-      AggregationNode node, PlanFragment childFragment)
-      throws ImpalaException {
-    Preconditions.checkArgument(childFragment.isPartitioned());
-    ArrayList<Expr> groupingExprs = node.getAggInfo().getGroupingExprs();
-    boolean hasGrouping = !groupingExprs.isEmpty();
-
-    DataPartition parentPartition = null;
-    if (hasGrouping) {
-      // the parent fragment is partitioned on the grouping exprs;
-      // substitute grouping exprs to reference the *output* of the agg, not the input
-      List<Expr> partitionExprs = node.getAggInfo().getPartitionExprs();
-      if (partitionExprs == null) partitionExprs = groupingExprs;
-      partitionExprs = Expr.substituteList(partitionExprs,
-          node.getAggInfo().getIntermediateSmap(), ctx_.getRootAnalyzer(), false);
-      boolean childHasCompatPartition = ctx_.getRootAnalyzer().equivSets(partitionExprs,
-            childFragment.getDataPartition().getPartitionExprs());
-      if (childHasCompatPartition) {
-        // The data is already partitioned on the required expressions, we can just do
-        // the aggregation in the child fragment without an extra merge step.
-        childFragment.addPlanRoot(node);
-        return childFragment;
-      }
-      parentPartition = DataPartition.hashPartitioned(partitionExprs);
-    } else {
-      // the parent fragment is unpartitioned
-      parentPartition = DataPartition.UNPARTITIONED;
-    }
-
-    // the original aggregation materializes the intermediate agg tuple and goes
-    // into the child fragment; merge aggregation materializes the output agg tuple
-    // and goes into a parent fragment
-    childFragment.addPlanRoot(node);
-    node.setIntermediateTuple();
-    node.setIsPreagg(ctx_);
-
-    // if there is a limit, we need to transfer it from the pre-aggregation
-    // node in the child fragment to the merge aggregation node in the parent
-    long limit = node.getLimit();
-    node.unsetLimit();
-    node.unsetNeedsFinalize();
-
-    // place a merge aggregation step in a new fragment
-    PlanFragment mergeFragment = createParentFragment(childFragment, parentPartition);
-    AggregationNode mergeAggNode = new AggregationNode(ctx_.getNextNodeId(),
-        mergeFragment.getPlanRoot(), node.getAggInfo().getMergeAggInfo());
-    mergeAggNode.init(ctx_.getRootAnalyzer());
-    mergeAggNode.setLimit(limit);
-
-    // HAVING predicates can only be evaluated after the merge agg step
-    node.transferConjuncts(mergeAggNode);
-    // Recompute stats after transferring the conjuncts_ (order is important).
-    node.computeStats(ctx_.getRootAnalyzer());
-    mergeFragment.getPlanRoot().computeStats(ctx_.getRootAnalyzer());
-    mergeAggNode.computeStats(ctx_.getRootAnalyzer());
-    // Set new plan root after updating stats.
-    mergeFragment.addPlanRoot(mergeAggNode);
-
-    return mergeFragment;
-  }
-
-  /**
-   * Returns a fragment that materialises the final result of a distinct aggregation
-   * where 'childFragment' is a partitioned fragment with the first phase aggregation
-   * as its root and 'node' is the second phase of the distinct aggregation.
-   */
-  private PlanFragment createPhase2DistinctAggregationFragment(AggregationNode node,
-      PlanFragment childFragment, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    ArrayList<Expr> groupingExprs = node.getAggInfo().getGroupingExprs();
-    boolean hasGrouping = !groupingExprs.isEmpty();
-
-    // The first-phase aggregation node is already in the child fragment.
-    Preconditions.checkState(node.getChild(0) == childFragment.getPlanRoot());
-
-    AggregateInfo firstPhaseAggInfo = ((AggregationNode) node.getChild(0)).getAggInfo();
-    List<Expr> partitionExprs = null;
-    if (hasGrouping) {
-      // We need to do
-      // - child fragment:
-      //   * phase-1 aggregation
-      // - merge fragment, hash-partitioned on grouping exprs:
-      //   * merge agg of phase 1
-      //   * phase 2 agg
-      // The output partition exprs of the child are the (input) grouping exprs of the
-      // parent. The grouping exprs reference the output tuple of the 1st phase, but the
-      // partitioning happens on the intermediate tuple of the 1st phase.
-      partitionExprs = Expr.substituteList(
-          groupingExprs, firstPhaseAggInfo.getOutputToIntermediateSmap(),
-          ctx_.getRootAnalyzer(), false);
-    } else {
-      // We need to do
-      // - child fragment:
-      //   * phase-1 aggregation
-      // - merge fragment 1, hash-partitioned on distinct exprs:
-      //   * merge agg of phase 1
-      //   * phase 2 agg
-      // - merge fragment 2, unpartitioned:
-      //   * merge agg of phase 2
-      partitionExprs = Expr.substituteList(firstPhaseAggInfo.getGroupingExprs(),
-          firstPhaseAggInfo.getIntermediateSmap(), ctx_.getRootAnalyzer(), false);
-    }
-
-    PlanFragment mergeFragment = null;
-    boolean childHasCompatPartition = ctx_.getRootAnalyzer().equivSets(partitionExprs,
-        childFragment.getDataPartition().getPartitionExprs());
-    if (childHasCompatPartition) {
-      // The data is already partitioned on the required expressions, we can skip the
-      // phase 1 merge step.
-      childFragment.addPlanRoot(node);
-      mergeFragment = childFragment;
-    } else {
-      DataPartition mergePartition = DataPartition.hashPartitioned(partitionExprs);
-      // Convert the existing node to a preaggregation.
-      AggregationNode preaggNode = (AggregationNode)node.getChild(0);
-      preaggNode.setIsPreagg(ctx_);
-
-      // place a merge aggregation step for the 1st phase in a new fragment
-      mergeFragment = createParentFragment(childFragment, mergePartition);
-      AggregateInfo phase1MergeAggInfo = firstPhaseAggInfo.getMergeAggInfo();
-      AggregationNode phase1MergeAggNode =
-          new AggregationNode(ctx_.getNextNodeId(), preaggNode, phase1MergeAggInfo);
-      phase1MergeAggNode.init(ctx_.getRootAnalyzer());
-      phase1MergeAggNode.unsetNeedsFinalize();
-      phase1MergeAggNode.setIntermediateTuple();
-      mergeFragment.addPlanRoot(phase1MergeAggNode);
-
-      // the 2nd-phase aggregation consumes the output of the merge agg;
-      // if there is a limit, it had already been placed with the 2nd aggregation
-      // step (which is where it should be)
-      mergeFragment.addPlanRoot(node);
-    }
-
-    if (!hasGrouping) {
-      // place the merge aggregation of the 2nd phase in an unpartitioned fragment;
-      // add preceding merge fragment at end
-      if (mergeFragment != childFragment) fragments.add(mergeFragment);
-
-      node.unsetNeedsFinalize();
-      node.setIntermediateTuple();
-      // Any limit should be placed in the final merge aggregation node
-      long limit = node.getLimit();
-      node.unsetLimit();
-      mergeFragment = createParentFragment(mergeFragment, DataPartition.UNPARTITIONED);
-      AggregateInfo phase2MergeAggInfo = node.getAggInfo().getMergeAggInfo();
-      AggregationNode phase2MergeAggNode = new AggregationNode(ctx_.getNextNodeId(), node,
-          phase2MergeAggInfo);
-      phase2MergeAggNode.init(ctx_.getRootAnalyzer());
-      // Transfer having predicates. If hasGrouping == true, the predicates should
-      // instead be evaluated by the 2nd phase agg (the predicates are already there).
-      node.transferConjuncts(phase2MergeAggNode);
-      phase2MergeAggNode.setLimit(limit);
-      mergeFragment.addPlanRoot(phase2MergeAggNode);
-    }
-    return mergeFragment;
-  }
-
-  /**
-   * Returns a fragment that produces the output of either an AnalyticEvalNode
-   * or of the SortNode that provides the input to an AnalyticEvalNode.
-   * ('node' can be either an AnalyticEvalNode or a SortNode).
-   * The returned fragment is either partitioned on the Partition By exprs or
-   * unpartitioned in the absence of such exprs.
-   */
-  private PlanFragment createAnalyticFragment(PlanNode node,
-      PlanFragment childFragment, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    Preconditions.checkState(
-        node instanceof SortNode || node instanceof AnalyticEvalNode);
-    if (node instanceof AnalyticEvalNode) {
-      AnalyticEvalNode analyticNode = (AnalyticEvalNode) node;
-      if (analyticNode.getPartitionExprs().isEmpty()
-          && analyticNode.getOrderByElements().isEmpty()) {
-        // no Partition-By/Order-By exprs: compute analytic exprs in single
-        // unpartitioned fragment
-        PlanFragment fragment = childFragment;
-        if (childFragment.isPartitioned()) {
-          fragment = createParentFragment(childFragment, DataPartition.UNPARTITIONED);
-        }
-        fragment.addPlanRoot(analyticNode);
-        return fragment;
-      } else {
-        childFragment.addPlanRoot(analyticNode);
-        return childFragment;
-      }
-    }
-
-    SortNode sortNode = (SortNode) node;
-    Preconditions.checkState(sortNode.isAnalyticSort());
-    PlanFragment analyticFragment = childFragment;
-    if (sortNode.getInputPartition() != null) {
-      // make sure the childFragment's output is partitioned as required by the sortNode
-      sortNode.getInputPartition().substitute(
-          childFragment.getPlanRoot().getOutputSmap(), ctx_.getRootAnalyzer());
-      if (!childFragment.getDataPartition().equals(sortNode.getInputPartition())) {
-        analyticFragment =
-            createParentFragment(childFragment, sortNode.getInputPartition());
-      }
-    }
-    analyticFragment.addPlanRoot(sortNode);
-    return analyticFragment;
-  }
-
-  /**
-   * Returns a new unpartitioned fragment that materializes the result of the given
-   * SortNode. If the child fragment is partitioned, returns a new fragment with a
-   * sort-merging exchange that merges the results of the partitioned sorts.
-   * The offset and limit are adjusted in the child and parent plan nodes to produce
-   * the correct result.
-   */
-  private PlanFragment createOrderByFragment(SortNode node,
-      PlanFragment childFragment, ArrayList<PlanFragment> fragments)
-      throws ImpalaException {
-    node.setChild(0, childFragment.getPlanRoot());
-    childFragment.addPlanRoot(node);
-    if (!childFragment.isPartitioned()) return childFragment;
-
-    // Remember original offset and limit.
-    boolean hasLimit = node.hasLimit();
-    long limit = node.getLimit();
-    long offset = node.getOffset();
-
-    // Create a new fragment for a sort-merging exchange.
-    PlanFragment mergeFragment =
-        createParentFragment(childFragment, DataPartition.UNPARTITIONED);
-    ExchangeNode exchNode = (ExchangeNode) mergeFragment.getPlanRoot();
-
-    // Set limit, offset and merge parameters in the exchange node.
-    exchNode.unsetLimit();
-    if (hasLimit) exchNode.setLimit(limit);
-    exchNode.setMergeInfo(node.getSortInfo(), offset);
-
-    // Child nodes should not process the offset. If there is a limit,
-    // the child nodes need only return (offset + limit) rows.
-    SortNode childSortNode = (SortNode) childFragment.getPlanRoot();
-    Preconditions.checkState(node == childSortNode);
-    if (hasLimit) {
-      childSortNode.unsetLimit();
-      childSortNode.setLimit(limit + offset);
-    }
-    childSortNode.setOffset(0);
-    childSortNode.computeStats(ctx_.getRootAnalyzer());
-    exchNode.computeStats(ctx_.getRootAnalyzer());
-
-    return mergeFragment;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/EmptySetNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/EmptySetNode.java b/fe/src/main/java/com/cloudera/impala/planner/EmptySetNode.java
deleted file mode 100644
index ed9dc70..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/EmptySetNode.java
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Node that returns an empty result set. Used for planning query blocks with a constant
- * predicate evaluating to false or a limit 0. The result set will have zero rows, but
- * the row descriptor must still include a materialized tuple so that the backend can
- * construct a valid row empty batch.
- */
-public class EmptySetNode extends PlanNode {
-  public EmptySetNode(PlanNodeId id, ArrayList<TupleId> tupleIds) {
-    super(id, tupleIds, "EMPTYSET");
-    Preconditions.checkArgument(tupleIds.size() > 0);
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    avgRowSize_ = 0;
-    cardinality_ = 0;
-    perHostMemCost_ = 0;
-    numNodes_ = 1;
-  }
-
-  @Override
-  public void init(Analyzer analyzer) {
-    Preconditions.checkState(conjuncts_.isEmpty());
-    // If the physical output tuple produced by an AnalyticEvalNode wasn't created
-    // the logical output tuple is returned by getMaterializedTupleIds(). It needs
-    // to be set as materialized (even though it isn't) to avoid failing precondition
-    // checks generating the thrift for slot refs that may reference this tuple.
-    for (TupleId id: tupleIds_) analyzer.getTupleDesc(id).setIsMaterialized(true);
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    return String.format("%s%s:%s\n", prefix, id_.toString(), displayName_);
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.EMPTY_SET_NODE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/ExchangeNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/ExchangeNode.java b/fe/src/main/java/com/cloudera/impala/planner/ExchangeNode.java
deleted file mode 100644
index eeef5fe..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/ExchangeNode.java
+++ /dev/null
@@ -1,204 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExchangeNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TSortInfo;
-import com.google.common.base.Preconditions;
-
-/**
- * Receiver side of a 1:n data stream. Logically, an ExchangeNode consumes the data
- * produced by its children. For each of the sending child nodes the actual data
- * transmission is performed by the DataStreamSink of the PlanFragment housing
- * that child node. Typically, an ExchangeNode only has a single sender child but,
- * e.g., for distributed union queries an ExchangeNode may have one sender child per
- * union operand.
- *
- * If a (optional) SortInfo field is set, the ExchangeNode will merge its
- * inputs on the parameters specified in the SortInfo object. It is assumed that the
- * inputs are also sorted individually on the same SortInfo parameter.
- */
-public class ExchangeNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(ExchangeNode.class);
-
-  // The serialization overhead per tuple in bytes when sent over an exchange.
-  // Currently it accounts only for the tuple_offset entry per tuple (4B) in a
-  // BE TRowBatch. If we modify the RowBatch serialization, then we need to
-  // update this constant as well.
-  private static final double PER_TUPLE_SERIALIZATION_OVERHEAD = 4.0;
-
-  // The parameters based on which sorted input streams are merged by this
-  // exchange node. Null if this exchange does not merge sorted streams
-  private SortInfo mergeInfo_;
-
-  // Offset after which the exchange begins returning rows. Currently valid
-  // only if mergeInfo_ is non-null, i.e. this is a merging exchange node.
-  private long offset_;
-
-  public ExchangeNode(PlanNodeId id, PlanNode input) {
-    super(id, "EXCHANGE");
-    offset_ = 0;
-    children_.add(input);
-    // Only apply the limit at the receiver if there are multiple senders.
-    if (input.getFragment().isPartitioned()) limit_ = input.limit_;
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    clearTupleIds();
-    tupleIds_.addAll(getChild(0).getTupleIds());
-    tblRefIds_.addAll(getChild(0).getTblRefIds());
-    nullableTupleIds_.addAll(getChild(0).getNullableTupleIds());
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    super.init(analyzer);
-    Preconditions.checkState(conjuncts_.isEmpty());
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    Preconditions.checkState(!children_.isEmpty(),
-        "ExchangeNode must have at least one child");
-    cardinality_ = 0;
-    for (PlanNode child: children_) {
-      if (child.getCardinality() == -1) {
-        cardinality_ = -1;
-        break;
-      }
-      cardinality_ = addCardinalities(cardinality_, child.getCardinality());
-    }
-
-    if (hasLimit()) {
-      if (cardinality_ == -1) {
-        cardinality_ = limit_;
-      } else {
-        cardinality_ = Math.min(limit_, cardinality_);
-      }
-    }
-
-    // Apply the offset correction if there's a valid cardinality
-    if (cardinality_ > -1) {
-      cardinality_ = Math.max(0, cardinality_ - offset_);
-    }
-
-    // Pick the max numNodes_ and avgRowSize_ of all children.
-    numNodes_ = Integer.MIN_VALUE;
-    avgRowSize_ = Integer.MIN_VALUE;
-    for (PlanNode child: children_) {
-      numNodes_ = Math.max(child.numNodes_, numNodes_);
-      avgRowSize_ = Math.max(child.avgRowSize_, avgRowSize_);
-    }
-  }
-
-  /**
-   * Set the parameters used to merge sorted input streams. This can be called
-   * after init().
-   */
-  public void setMergeInfo(SortInfo info, long offset) {
-    mergeInfo_ = info;
-    offset_ = offset;
-    displayName_ = "MERGING-EXCHANGE";
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s [%s]\n", prefix,
-        getDisplayLabel(), getDisplayLabelDetail()));
-
-    if (offset_ > 0) {
-      output.append(detailPrefix + "offset: ").append(offset_).append("\n");
-    }
-
-    if (mergeInfo_ != null && detailLevel.ordinal() > TExplainLevel.MINIMAL.ordinal()) {
-      output.append(detailPrefix + "order by: ");
-      for (int i = 0; i < mergeInfo_.getOrderingExprs().size(); ++i) {
-        if (i > 0) output.append(", ");
-        output.append(mergeInfo_.getOrderingExprs().get(i).toSql() + " ");
-        output.append(mergeInfo_.getIsAscOrder().get(i) ? "ASC" : "DESC");
-
-        Boolean nullsFirstParam = mergeInfo_.getNullsFirstParams().get(i);
-        if (nullsFirstParam != null) {
-          output.append(nullsFirstParam ? " NULLS FIRST" : " NULLS LAST");
-        }
-      }
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    // For the non-fragmented explain levels, print the data partition
-    // of the data stream sink that sends to this exchange node.
-    Preconditions.checkState(!children_.isEmpty());
-    DataSink sink = getChild(0).getFragment().getSink();
-    if (sink == null) return "";
-    Preconditions.checkState(sink instanceof DataStreamSink);
-    DataStreamSink streamSink = (DataStreamSink) sink;
-    if (!streamSink.getOutputPartition().isPartitioned() &&
-        fragment_.isPartitioned()) {
-      // If the output of the sink is not partitioned but the target fragment is
-      // partitioned, then the data exchange is broadcast.
-      return "BROADCAST";
-    } else {
-      return streamSink.getOutputPartition().getExplainString();
-    }
-  }
-
-  /**
-   * Returns the average size of rows produced by 'exchInput' when serialized for
-   * being sent through an exchange.
-   */
-  public static double getAvgSerializedRowSize(PlanNode exchInput) {
-    return exchInput.getAvgRowSize() +
-        (exchInput.getTupleIds().size() * PER_TUPLE_SERIALIZATION_OVERHEAD);
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.EXCHANGE_NODE;
-    msg.exchange_node = new TExchangeNode();
-    for (TupleId tid: tupleIds_) {
-      msg.exchange_node.addToInput_row_tuples(tid.asInt());
-    }
-
-    if (mergeInfo_ != null) {
-      TSortInfo sortInfo = new TSortInfo(
-          Expr.treesToThrift(mergeInfo_.getOrderingExprs()), mergeInfo_.getIsAscOrder(),
-          mergeInfo_.getNullsFirst());
-      msg.exchange_node.setSort_info(sortInfo);
-      msg.exchange_node.setOffset(offset_);
-    }
-  }
-}



[25/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Type.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Type.java b/fe/src/main/java/com/cloudera/impala/catalog/Type.java
deleted file mode 100644
index 0162ec6..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Type.java
+++ /dev/null
@@ -1,781 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.analysis.CreateTableStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.analysis.TypeDef;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Abstract class describing an Impala data type (scalar/complex type).
- * Mostly contains static type instances and helper methods for convenience, as well
- * as abstract methods that subclasses must implement.
- */
-public abstract class Type {
-  // Maximum nesting depth of a type. This limit was determined experimentally by
-  // generating and scanning deeply nested Parquet and Avro files. In those experiments,
-  // we exceeded the stack space in the scanner (which uses recursion for dealing with
-  // nested types) at a nesting depth between 200 and 300 (200 worked, 300 crashed).
-  public static int MAX_NESTING_DEPTH = 100;
-
-  // Static constant types for scalar types that don't require additional information.
-  public static final ScalarType INVALID = new ScalarType(PrimitiveType.INVALID_TYPE);
-  public static final ScalarType NULL = new ScalarType(PrimitiveType.NULL_TYPE);
-  public static final ScalarType BOOLEAN = new ScalarType(PrimitiveType.BOOLEAN);
-  public static final ScalarType TINYINT = new ScalarType(PrimitiveType.TINYINT);
-  public static final ScalarType SMALLINT = new ScalarType(PrimitiveType.SMALLINT);
-  public static final ScalarType INT = new ScalarType(PrimitiveType.INT);
-  public static final ScalarType BIGINT = new ScalarType(PrimitiveType.BIGINT);
-  public static final ScalarType FLOAT = new ScalarType(PrimitiveType.FLOAT);
-  public static final ScalarType DOUBLE = new ScalarType(PrimitiveType.DOUBLE);
-  public static final ScalarType STRING = new ScalarType(PrimitiveType.STRING);
-  public static final ScalarType BINARY = new ScalarType(PrimitiveType.BINARY);
-  public static final ScalarType TIMESTAMP = new ScalarType(PrimitiveType.TIMESTAMP);
-  public static final ScalarType DATE = new ScalarType(PrimitiveType.DATE);
-  public static final ScalarType DATETIME = new ScalarType(PrimitiveType.DATETIME);
-  public static final ScalarType DEFAULT_DECIMAL = (ScalarType)
-      ScalarType.createDecimalType(ScalarType.DEFAULT_PRECISION,
-          ScalarType.DEFAULT_SCALE);
-  public static final ScalarType DECIMAL =
-      (ScalarType) ScalarType.createDecimalTypeInternal(-1, -1);
-  public static final ScalarType DEFAULT_VARCHAR = ScalarType.createVarcharType(-1);
-  public static final ScalarType VARCHAR = ScalarType.createVarcharType(-1);
-  public static final ScalarType CHAR = (ScalarType) ScalarType.createCharType(-1);
-
-  private static ArrayList<ScalarType> integerTypes;
-  private static ArrayList<ScalarType> numericTypes;
-  private static ArrayList<ScalarType> supportedTypes;
-  private static ArrayList<ScalarType> unsupportedTypes;
-
-  static {
-    integerTypes = Lists.newArrayList();
-    integerTypes.add(TINYINT);
-    integerTypes.add(SMALLINT);
-    integerTypes.add(INT);
-    integerTypes.add(BIGINT);
-
-    numericTypes = Lists.newArrayList();
-    numericTypes.add(TINYINT);
-    numericTypes.add(SMALLINT);
-    numericTypes.add(INT);
-    numericTypes.add(BIGINT);
-    numericTypes.add(FLOAT);
-    numericTypes.add(DOUBLE);
-    numericTypes.add(DECIMAL);
-
-    supportedTypes = Lists.newArrayList();
-    supportedTypes.add(NULL);
-    supportedTypes.add(BOOLEAN);
-    supportedTypes.add(TINYINT);
-    supportedTypes.add(SMALLINT);
-    supportedTypes.add(INT);
-    supportedTypes.add(BIGINT);
-    supportedTypes.add(FLOAT);
-    supportedTypes.add(DOUBLE);
-    supportedTypes.add(STRING);
-    supportedTypes.add(VARCHAR);
-    supportedTypes.add(CHAR);
-    supportedTypes.add(TIMESTAMP);
-    supportedTypes.add(DECIMAL);
-
-    unsupportedTypes = Lists.newArrayList();
-    unsupportedTypes.add(BINARY);
-    unsupportedTypes.add(DATE);
-    unsupportedTypes.add(DATETIME);
-  }
-
-  public static ArrayList<ScalarType> getIntegerTypes() {
-    return integerTypes;
-  }
-  public static ArrayList<ScalarType> getNumericTypes() {
-    return numericTypes;
-  }
-  public static ArrayList<ScalarType> getSupportedTypes() {
-    return supportedTypes;
-  }
-  public static ArrayList<ScalarType> getUnsupportedTypes() {
-    return unsupportedTypes;
-  }
-
-  /**
-   * The output of this is stored directly in the hive metastore as the column type.
-   * The string must match exactly.
-   */
-  public final String toSql() { return toSql(0); }
-
-  /**
-   * Recursive helper for toSql() to be implemented by subclasses. Keeps track of the
-   * nesting depth and terminates the recursion if MAX_NESTING_DEPTH is reached.
-   */
-  protected abstract String toSql(int depth);
-
-  /**
-   * Same as toSql() but adds newlines and spaces for better readability of nested types.
-   */
-  public String prettyPrint() { return prettyPrint(0); }
-
-  /**
-   * Pretty prints this type with lpad number of leading spaces. Used to implement
-   * prettyPrint() with space-indented nested types.
-   */
-  protected abstract String prettyPrint(int lpad);
-
-  public boolean isInvalid() { return isScalarType(PrimitiveType.INVALID_TYPE); }
-  public boolean isValid() { return !isInvalid(); }
-  public boolean isNull() { return isScalarType(PrimitiveType.NULL_TYPE); }
-  public boolean isBoolean() { return isScalarType(PrimitiveType.BOOLEAN); }
-  public boolean isTimestamp() { return isScalarType(PrimitiveType.TIMESTAMP); }
-  public boolean isDecimal() { return isScalarType(PrimitiveType.DECIMAL); }
-  public boolean isDecimalOrNull() { return isDecimal() || isNull(); }
-  public boolean isFullySpecifiedDecimal() { return false; }
-  public boolean isWildcardDecimal() { return false; }
-  public boolean isWildcardVarchar() { return false; }
-  public boolean isWildcardChar() { return false; }
-
-  public boolean isStringType() {
-    return isScalarType(PrimitiveType.STRING) || isScalarType(PrimitiveType.VARCHAR) ||
-        isScalarType(PrimitiveType.CHAR);
-  }
-
-  public boolean isScalarType() { return this instanceof ScalarType; }
-  public boolean isScalarType(PrimitiveType t) {
-    return isScalarType() && ((ScalarType) this).getPrimitiveType() == t;
-  }
-
-  public boolean isFixedPointType() {
-    return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT) ||
-        isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT) ||
-        isScalarType(PrimitiveType.DECIMAL);
-  }
-
-  public boolean isFloatingPointType() {
-    return isScalarType(PrimitiveType.FLOAT) || isScalarType(PrimitiveType.DOUBLE);
-  }
-
-  public boolean isIntegerType() {
-    return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT)
-        || isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT);
-  }
-
-  // TODO: Handle complex types properly. Some instances may be fixed length.
-  public boolean isFixedLengthType() { return false; }
-
-  public boolean isNumericType() {
-    return isFixedPointType() || isFloatingPointType() || isDecimal();
-  }
-
-  public boolean isDateType() {
-    return isScalarType(PrimitiveType.DATE) || isScalarType(PrimitiveType.DATETIME)
-        || isScalarType(PrimitiveType.TIMESTAMP);
-  }
-
-  public boolean isComplexType() { return isStructType() || isCollectionType(); }
-  public boolean isCollectionType() { return isMapType() || isArrayType(); }
-  public boolean isMapType() { return this instanceof MapType; }
-  public boolean isArrayType() { return this instanceof ArrayType; }
-  public boolean isStructType() { return this instanceof StructType; }
-
-  /**
-   * Returns true if Impala supports this type in the metdata. It does not mean we
-   * can manipulate data of this type. For tables that contain columns with these
-   * types, we can safely skip over them.
-   */
-  public boolean isSupported() { return true; }
-
-  /**
-   * Indicates whether we support partitioning tables on columns of this type.
-   */
-  public boolean supportsTablePartitioning() { return false; }
-
-  public PrimitiveType getPrimitiveType() { return PrimitiveType.INVALID_TYPE; }
-
-  /**
-   * Returns the size in bytes of the fixed-length portion that a slot of this type
-   * occupies in a tuple.
-   */
-  public int getSlotSize() {
-    // 8-byte pointer and 4-byte length indicator (12 bytes total).
-    // Per struct alignment rules, there is an extra 4 bytes of padding to align to 8
-    // bytes so 16 bytes total.
-    if (isCollectionType()) return 16;
-    throw new IllegalStateException("getSlotSize() not implemented for type " + toSql());
-  }
-
-  public TColumnType toThrift() {
-    TColumnType container = new TColumnType();
-    container.setTypes(new ArrayList<TTypeNode>());
-    toThrift(container);
-    return container;
-  }
-
-  /**
-   * Subclasses should override this method to add themselves to the thrift container.
-   */
-  public abstract void toThrift(TColumnType container);
-
-  /**
-   * Returns true if this type is equal to t, or if t is a wildcard variant of this
-   * type. Subclasses should override this as appropriate. The default implementation
-   * here is to avoid special-casing logic in callers for concrete types.
-   */
-  public boolean matchesType(Type t) { return false; }
-
-  /**
-   * Gets the ColumnType from the given FieldSchema by using Impala's SqlParser.
-   * Returns null if the FieldSchema could not be parsed.
-   * The type can either be:
-   *   - Supported by Impala, in which case the type is returned.
-   *   - A type Impala understands but is not yet implemented (e.g. date), the type is
-   *     returned but type.IsSupported() returns false.
-   *   - A type Impala can't understand at all in which case null is returned.
-   */
-  public static Type parseColumnType(String typeStr) {
-    // Wrap the type string in a CREATE TABLE stmt and use Impala's Parser
-    // to get the ColumnType.
-    // Pick a table name that can't be used.
-    String stmt = String.format("CREATE TABLE $DUMMY ($DUMMY %s)", typeStr);
-    SqlScanner input = new SqlScanner(new StringReader(stmt));
-    SqlParser parser = new SqlParser(input);
-    CreateTableStmt createTableStmt;
-    try {
-      Object o = parser.parse().value;
-      if (!(o instanceof CreateTableStmt)) {
-        // Should never get here.
-        throw new IllegalStateException("Couldn't parse create table stmt.");
-      }
-      createTableStmt = (CreateTableStmt) o;
-      if (createTableStmt.getColumnDefs().isEmpty()) {
-        // Should never get here.
-        throw new IllegalStateException("Invalid create table stmt.");
-      }
-    } catch (Exception e) {
-      return null;
-    }
-    TypeDef typeDef = createTableStmt.getColumnDefs().get(0).getTypeDef();
-    return typeDef.getType();
-  }
-
-  /**
-   * Returns true if t1 can be implicitly cast to t2 according to Impala's casting rules.
-   * Implicit casts are always allowed when no loss of precision would result (i.e. every
-   * value of t1 can be represented exactly by a value of t2). Implicit casts are allowed
-   * in certain other cases such as casting numeric types to floating point types and
-   * converting strings to timestamps.
-   * If strict is true, only consider casts that result in no loss of precision.
-   * TODO: Support casting of non-scalar types.
-   */
-  public static boolean isImplicitlyCastable(Type t1, Type t2, boolean strict) {
-    if (t1.isScalarType() && t2.isScalarType()) {
-      return ScalarType.isImplicitlyCastable(
-          (ScalarType) t1, (ScalarType) t2, strict);
-    }
-    return false;
-  }
-
-  /**
-   * Return type t such that values from both t1 and t2 can be assigned to t without an
-   * explicit cast. If strict, does not consider conversions that would result in loss
-   * of precision (e.g. converting decimal to float). Returns INVALID_TYPE if there is
-   * no such type or if any of t1 and t2 is INVALID_TYPE.
-   * TODO: Support non-scalar types.
-   */
-  public static Type getAssignmentCompatibleType(Type t1, Type t2, boolean strict) {
-    if (t1.isScalarType() && t2.isScalarType()) {
-      return ScalarType.getAssignmentCompatibleType(
-          (ScalarType) t1, (ScalarType) t2, strict);
-    }
-    return ScalarType.INVALID;
-  }
-
-  /**
-   * Returns true if this type exceeds the MAX_NESTING_DEPTH, false otherwise.
-   */
-  public boolean exceedsMaxNestingDepth() { return exceedsMaxNestingDepth(0); }
-
-  /**
-   * Helper for exceedsMaxNestingDepth(). Recursively computes the max nesting depth,
-   * terminating early if MAX_NESTING_DEPTH is reached. Returns true if this type
-   * exceeds the MAX_NESTING_DEPTH, false otherwise.
-   *
-   * Examples of types and their nesting depth:
-   * INT --> 1
-   * STRUCT<f1:INT> --> 2
-   * STRUCT<f1:STRUCT<f2:INT>> --> 3
-   * ARRAY<INT> --> 2
-   * ARRAY<STRUCT<f1:INT>> --> 3
-   * MAP<STRING,INT> --> 2
-   * MAP<STRING,STRUCT<f1:INT>> --> 3
-   */
-  private boolean exceedsMaxNestingDepth(int d) {
-    if (d >= MAX_NESTING_DEPTH) return true;
-    if (isStructType()) {
-      StructType structType = (StructType) this;
-      for (StructField f: structType.getFields()) {
-        if (f.getType().exceedsMaxNestingDepth(d + 1)) return true;
-      }
-    } else if (isArrayType()) {
-      ArrayType arrayType = (ArrayType) this;
-      if (arrayType.getItemType().exceedsMaxNestingDepth(d + 1)) return true;
-    } else if (isMapType()) {
-      MapType mapType = (MapType) this;
-      if (mapType.getValueType().exceedsMaxNestingDepth(d + 1)) return true;
-    } else {
-      Preconditions.checkState(isScalarType());
-    }
-    return false;
-  }
-
-  public static List<TColumnType> toThrift(Type[] types) {
-    return toThrift(Lists.newArrayList(types));
-  }
-
-  public static List<TColumnType> toThrift(ArrayList<Type> types) {
-    ArrayList<TColumnType> result = Lists.newArrayList();
-    for (Type t: types) {
-      result.add(t.toThrift());
-    }
-    return result;
-  }
-
-  public static Type fromThrift(TColumnType thrift) {
-    Preconditions.checkState(thrift.types.size() > 0);
-    Pair<Type, Integer> t = fromThrift(thrift, 0);
-    Preconditions.checkState(t.second.equals(thrift.getTypesSize()));
-    return t.first;
-  }
-
-  /**
-   * Constructs a ColumnType rooted at the TTypeNode at nodeIdx in TColumnType.
-   * Returned pair: The resulting ColumnType and the next nodeIdx that is not a child
-   * type of the result.
-   */
-  protected static Pair<Type, Integer> fromThrift(TColumnType col, int nodeIdx) {
-    TTypeNode node = col.getTypes().get(nodeIdx);
-    Type type = null;
-    switch (node.getType()) {
-      case SCALAR: {
-        Preconditions.checkState(node.isSetScalar_type());
-        TScalarType scalarType = node.getScalar_type();
-        if (scalarType.getType() == TPrimitiveType.CHAR) {
-          Preconditions.checkState(scalarType.isSetLen());
-          type = ScalarType.createCharType(scalarType.getLen());
-        } else if (scalarType.getType() == TPrimitiveType.VARCHAR) {
-          Preconditions.checkState(scalarType.isSetLen());
-          type = ScalarType.createVarcharType(scalarType.getLen());
-        } else if (scalarType.getType() == TPrimitiveType.DECIMAL) {
-          Preconditions.checkState(scalarType.isSetPrecision()
-              && scalarType.isSetScale());
-          type = ScalarType.createDecimalType(scalarType.getPrecision(),
-              scalarType.getScale());
-        } else {
-          type = ScalarType.createType(
-              PrimitiveType.fromThrift(scalarType.getType()));
-        }
-        ++nodeIdx;
-        break;
-      }
-      case ARRAY: {
-        Preconditions.checkState(nodeIdx + 1 < col.getTypesSize());
-        Pair<Type, Integer> childType = fromThrift(col, nodeIdx + 1);
-        type = new ArrayType(childType.first);
-        nodeIdx = childType.second;
-        break;
-      }
-      case MAP: {
-        Preconditions.checkState(nodeIdx + 2 < col.getTypesSize());
-        Pair<Type, Integer> keyType = fromThrift(col, nodeIdx + 1);
-        Pair<Type, Integer> valueType = fromThrift(col, keyType.second);
-        type = new MapType(keyType.first, valueType.first);
-        nodeIdx = valueType.second;
-        break;
-      }
-      case STRUCT: {
-        Preconditions.checkState(nodeIdx + node.getStruct_fieldsSize() < col.getTypesSize());
-        ArrayList<StructField> structFields = Lists.newArrayList();
-        ++nodeIdx;
-        for (int i = 0; i < node.getStruct_fieldsSize(); ++i) {
-          TStructField thriftField = node.getStruct_fields().get(i);
-          String name = thriftField.getName();
-          String comment = null;
-          if (thriftField.isSetComment()) comment = thriftField.getComment();
-          Pair<Type, Integer> res = fromThrift(col, nodeIdx);
-          nodeIdx = res.second.intValue();
-          structFields.add(new StructField(name, res.first, comment));
-        }
-        type = new StructType(structFields);
-        break;
-      }
-    }
-    return new Pair<Type, Integer>(type, nodeIdx);
-  }
-
-  /**
-   * Utility function to get the primitive type of a thrift type that is known
-   * to be scalar.
-   */
-  public TPrimitiveType getTPrimitiveType(TColumnType ttype) {
-    Preconditions.checkState(ttype.getTypesSize() == 1);
-    Preconditions.checkState(ttype.types.get(0).getType() == TTypeNodeType.SCALAR);
-    return ttype.types.get(0).scalar_type.getType();
-  }
-
-  /**
-   * JDBC data type description
-   * Returns the column size for this type.
-   * For numeric data this is the maximum precision.
-   * For character data this is the length in characters.
-   * For datetime types this is the length in characters of the String representation
-   * (assuming the maximum allowed precision of the fractional seconds component).
-   * For binary data this is the length in bytes.
-   * Null is returned for for data types where the column size is not applicable.
-   */
-  public Integer getColumnSize() {
-    if (!isScalarType()) return null;
-    if (isNumericType()) return getPrecision();
-    ScalarType t = (ScalarType) this;
-    switch (t.getPrimitiveType()) {
-      case STRING:
-        return Integer.MAX_VALUE;
-      case TIMESTAMP:
-        return 29;
-      case CHAR:
-      case VARCHAR:
-        return t.getLength();
-      default:
-        return null;
-    }
-  }
-
-  /**
-   * JDBC data type description
-   * For numeric types, returns the maximum precision for this type.
-   * For non-numeric types, returns null.
-   */
-  public Integer getPrecision() {
-    if (!isScalarType()) return null;
-    ScalarType t = (ScalarType) this;
-    switch (t.getPrimitiveType()) {
-      case TINYINT:
-        return 3;
-      case SMALLINT:
-        return 5;
-      case INT:
-        return 10;
-      case BIGINT:
-        return 19;
-      case FLOAT:
-        return 7;
-      case DOUBLE:
-        return 15;
-      case DECIMAL:
-        return t.decimalPrecision();
-      default:
-        return null;
-    }
-  }
-
-  /**
-   * JDBC data type description
-   * Returns the number of fractional digits for this type, or null if not applicable.
-   * For timestamp/time types, returns the number of digits in the fractional seconds
-   * component.
-   */
-  public Integer getDecimalDigits() {
-    if (!isScalarType()) return null;
-    ScalarType t = (ScalarType) this;
-    switch (t.getPrimitiveType()) {
-      case BOOLEAN:
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-        return 0;
-      case FLOAT:
-        return 7;
-      case DOUBLE:
-        return 15;
-      case TIMESTAMP:
-        return 9;
-      case DECIMAL:
-        return t.decimalScale();
-      default:
-        return null;
-    }
-  }
-
-  /**
-   * JDBC data type description
-   * For numeric data types, either 10 or 2. If it is 10, the values in COLUMN_SIZE
-   * and DECIMAL_DIGITS give the number of decimal digits allowed for the column.
-   * For example, a DECIMAL(12,5) column would return a NUM_PREC_RADIX of 10,
-   * a COLUMN_SIZE of 12, and a DECIMAL_DIGITS of 5; a FLOAT column could return
-   * a NUM_PREC_RADIX of 10, a COLUMN_SIZE of 15, and a DECIMAL_DIGITS of NULL.
-   * If it is 2, the values in COLUMN_SIZE and DECIMAL_DIGITS give the number of bits
-   * allowed in the column. For example, a FLOAT column could return a RADIX of 2,
-   * a COLUMN_SIZE of 53, and a DECIMAL_DIGITS of NULL. NULL is returned for data
-   * types where NUM_PREC_RADIX is not applicable.
-   */
-  public Integer getNumPrecRadix() {
-    if (!isScalarType()) return null;
-    ScalarType t = (ScalarType) this;
-    switch (t.getPrimitiveType()) {
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-      case FLOAT:
-      case DOUBLE:
-      case DECIMAL:
-        return 10;
-      default:
-        // everything else (including boolean and string) is null
-        return null;
-    }
-  }
-
-  /**
-   * JDBC data type description
-   * Returns the java SQL type enum
-   */
-  public int getJavaSqlType() {
-    if (isStructType()) return java.sql.Types.STRUCT;
-    // Both MAP and ARRAY are reported as ARRAY, since there is no better matching
-    // Java SQL type. This behavior is consistent with Hive.
-    if (isCollectionType()) return java.sql.Types.ARRAY;
-
-    Preconditions.checkState(isScalarType(), "Invalid non-scalar type: " + toSql());
-    ScalarType t = (ScalarType) this;
-    switch (t.getPrimitiveType()) {
-      case NULL_TYPE: return java.sql.Types.NULL;
-      case BOOLEAN: return java.sql.Types.BOOLEAN;
-      case TINYINT: return java.sql.Types.TINYINT;
-      case SMALLINT: return java.sql.Types.SMALLINT;
-      case INT: return java.sql.Types.INTEGER;
-      case BIGINT: return java.sql.Types.BIGINT;
-      case FLOAT: return java.sql.Types.FLOAT;
-      case DOUBLE: return java.sql.Types.DOUBLE;
-      case TIMESTAMP: return java.sql.Types.TIMESTAMP;
-      case STRING: return java.sql.Types.VARCHAR;
-      case CHAR: return java.sql.Types.CHAR;
-      case VARCHAR: return java.sql.Types.VARCHAR;
-      case BINARY: return java.sql.Types.BINARY;
-      case DECIMAL: return java.sql.Types.DECIMAL;
-      default:
-        Preconditions.checkArgument(false, "Invalid primitive type " +
-            t.getPrimitiveType().name());
-        return 0;
-    }
-  }
-
-  /**
-   * Matrix that records "smallest" assignment-compatible type of two types
-   * (INVALID_TYPE if no such type exists, ie, if the input types are fundamentally
-   * incompatible). A value of any of the two types could be assigned to a slot
-   * of the assignment-compatible type. For strict compatibility, this can be done
-   * without any loss of precision. For non-strict compatibility, there may be loss of
-   * precision, e.g. if converting from BIGINT to FLOAT.
-   *
-   * We chose not to follow MySQL's type casting behavior as described here:
-   * http://dev.mysql.com/doc/refman/5.0/en/type-conversion.html
-   * for the following reasons:
-   * conservative casting in arithmetic exprs: TINYINT + TINYINT -> BIGINT
-   * comparison of many types as double: INT < FLOAT -> comparison as DOUBLE
-   * special cases when dealing with dates and timestamps.
-   */
-  protected static PrimitiveType[][] compatibilityMatrix;
-
-  /**
-   * If we are checking in strict mode, any non-null entry in this matrix overrides
-   * compatibilityMatrix. If the entry is null, the entry in compatibility matrix
-   * is valid.
-   */
-  protected static PrimitiveType[][] strictCompatibilityMatrix;
-
-  static {
-    compatibilityMatrix = new
-        PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
-    strictCompatibilityMatrix = new
-        PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
-
-    for (int i = 0; i < PrimitiveType.values().length; ++i) {
-      // Each type is compatible with itself.
-      compatibilityMatrix[i][i] = PrimitiveType.values()[i];
-      // BINARY is not supported.
-      compatibilityMatrix[BINARY.ordinal()][i] = PrimitiveType.INVALID_TYPE;
-      compatibilityMatrix[i][BINARY.ordinal()] = PrimitiveType.INVALID_TYPE;
-    }
-
-    compatibilityMatrix[BOOLEAN.ordinal()][TINYINT.ordinal()] = PrimitiveType.TINYINT;
-    compatibilityMatrix[BOOLEAN.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
-    compatibilityMatrix[BOOLEAN.ordinal()][INT.ordinal()] = PrimitiveType.INT;
-    compatibilityMatrix[BOOLEAN.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
-    compatibilityMatrix[BOOLEAN.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
-    compatibilityMatrix[BOOLEAN.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[BOOLEAN.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BOOLEAN.ordinal()][DATETIME.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BOOLEAN.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BOOLEAN.ordinal()][STRING.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BOOLEAN.ordinal()][VARCHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BOOLEAN.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[TINYINT.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
-    compatibilityMatrix[TINYINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
-    compatibilityMatrix[TINYINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
-    // 8 bit integer fits in mantissa of both float and double.
-    compatibilityMatrix[TINYINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
-    compatibilityMatrix[TINYINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[TINYINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TINYINT.ordinal()][DATETIME.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TINYINT.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TINYINT.ordinal()][STRING.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TINYINT.ordinal()][VARCHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TINYINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[SMALLINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
-    compatibilityMatrix[SMALLINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
-    // 16 bit integer fits in mantissa of both float and double.
-    compatibilityMatrix[SMALLINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
-    compatibilityMatrix[SMALLINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[SMALLINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[SMALLINT.ordinal()][DATETIME.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[SMALLINT.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[SMALLINT.ordinal()][STRING.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[SMALLINT.ordinal()][VARCHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[SMALLINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[INT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
-    // 32 bit integer fits only mantissa of double.
-    // TODO: arguably we should promote INT + FLOAT to DOUBLE to avoid loss of precision,
-    // but we depend on it remaining FLOAT for some use cases, e.g.
-    // "insert into tbl (float_col) select int_col + float_col from ..."
-    compatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
-    strictCompatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[INT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[INT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[INT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[INT.ordinal()][TIMESTAMP.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[INT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[INT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[INT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    // 64 bit integer does not fit in mantissa of double or float.
-    // TODO: arguably we should always promote BIGINT + FLOAT to double here to keep as
-    // much precision as possible, but we depend on this implicit cast for some use
-    // cases, similarly to INT + FLOAT.
-    compatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
-    strictCompatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
-    // TODO: we're breaking the definition of strict compatibility for BIGINT + DOUBLE,
-    // but this forces function overloading to consider the DOUBLE overload first.
-    compatibilityMatrix[BIGINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[BIGINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BIGINT.ordinal()][DATETIME.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BIGINT.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BIGINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BIGINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[BIGINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[FLOAT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
-    compatibilityMatrix[FLOAT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[FLOAT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[FLOAT.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[FLOAT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[FLOAT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[FLOAT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[DOUBLE.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DOUBLE.ordinal()][DATETIME.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DOUBLE.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DOUBLE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DOUBLE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DOUBLE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[DATE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DATETIME;
-    compatibilityMatrix[DATE.ordinal()][TIMESTAMP.ordinal()] = PrimitiveType.TIMESTAMP;
-    compatibilityMatrix[DATE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DATE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DATE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[DATETIME.ordinal()][TIMESTAMP.ordinal()] =
-        PrimitiveType.TIMESTAMP;
-    compatibilityMatrix[DATETIME.ordinal()][STRING.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DATETIME.ordinal()][VARCHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[DATETIME.ordinal()][CHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-
-    // We can convert some but not all string values to timestamps.
-    compatibilityMatrix[TIMESTAMP.ordinal()][STRING.ordinal()] =
-        PrimitiveType.TIMESTAMP;
-    strictCompatibilityMatrix[TIMESTAMP.ordinal()][STRING.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TIMESTAMP.ordinal()][VARCHAR.ordinal()] =
-        PrimitiveType.INVALID_TYPE;
-    compatibilityMatrix[TIMESTAMP.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    compatibilityMatrix[STRING.ordinal()][VARCHAR.ordinal()] = PrimitiveType.STRING;
-    compatibilityMatrix[STRING.ordinal()][CHAR.ordinal()] = PrimitiveType.STRING;
-
-    compatibilityMatrix[VARCHAR.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
-
-    // Check all of the necessary entries that should be filled.
-    for (int i = 0; i < PrimitiveType.values().length; ++i) {
-      for (int j = i; j < PrimitiveType.values().length; ++j) {
-        PrimitiveType t1 = PrimitiveType.values()[i];
-        PrimitiveType t2 = PrimitiveType.values()[j];
-        // DECIMAL, NULL, and INVALID_TYPE  are handled separately.
-        if (t1 == PrimitiveType.INVALID_TYPE ||
-            t2 == PrimitiveType.INVALID_TYPE) continue;
-        if (t1 == PrimitiveType.NULL_TYPE || t2 == PrimitiveType.NULL_TYPE) continue;
-        if (t1 == PrimitiveType.DECIMAL || t2 == PrimitiveType.DECIMAL) continue;
-        Preconditions.checkNotNull(compatibilityMatrix[i][j]);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/View.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/View.java b/fe/src/main/java/com/cloudera/impala/catalog/View.java
deleted file mode 100644
index cc82f95..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/View.java
+++ /dev/null
@@ -1,207 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.io.StringReader;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.analysis.ParseNode;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.google.common.collect.Lists;
-
-/**
- * Table metadata representing a catalog view or a local view from a WITH clause.
- * Most methods inherited from Table are not supposed to be called on this class because
- * views are substituted with their underlying definition during analysis of a statement.
- *
- * Refreshing or invalidating a view will reload the view's definition but will not
- * affect the metadata of the underlying tables (if any).
- */
-public class View extends Table {
-
-  // The original SQL-string given as view definition. Set during analysis.
-  // Corresponds to Hive's viewOriginalText.
-  private String originalViewDef_;
-
-  // Query statement (as SQL string) that defines the View for view substitution.
-  // It is a transformation of the original view definition, e.g., to enforce the
-  // explicit column definitions even if the original view definition has explicit
-  // column aliases.
-  // If column definitions were given, then this "expanded" view definition
-  // wraps the original view definition in a select stmt as follows.
-  //
-  // SELECT viewName.origCol1 AS colDesc1, viewName.origCol2 AS colDesc2, ...
-  // FROM (originalViewDef) AS viewName
-  //
-  // Corresponds to Hive's viewExpandedText, but is not identical to the SQL
-  // Hive would produce in view creation.
-  private String inlineViewDef_;
-
-  // View definition created by parsing inlineViewDef_ into a QueryStmt.
-  private QueryStmt queryStmt_;
-
-  // Set if this View is from a WITH clause and not persisted in the catalog.
-  private final boolean isLocalView_;
-
-  // Set if this View is from a WITH clause with column labels.
-  private List<String> colLabels_;
-
-  public View(TableId id, org.apache.hadoop.hive.metastore.api.Table msTable,
-      Db db, String name, String owner) {
-    super(id, msTable, db, name, owner);
-    isLocalView_ = false;
-  }
-
-  /**
-   * C'tor for WITH-clause views that already have a parsed QueryStmt and an optional
-   * list of column labels.
-   */
-  public View(String alias, QueryStmt queryStmt, List<String> colLabels) {
-    super(null, null, null, alias, null);
-    isLocalView_ = true;
-    queryStmt_ = queryStmt;
-    colLabels_ = colLabels;
-  }
-
-  /**
-   * Creates a view for testig purposes.
-   */
-  private View(Db db, String name, QueryStmt queryStmt) {
-    super(null, null, db, name, null);
-    isLocalView_ = false;
-    queryStmt_ = queryStmt;
-    colLabels_ = null;
-  }
-
-  @Override
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    try {
-      clearColumns();
-      msTable_ = msTbl;
-      // Load columns.
-      List<FieldSchema> fieldSchemas = client.getFields(db_.getName(), name_);
-      for (int i = 0; i < fieldSchemas.size(); ++i) {
-        FieldSchema s = fieldSchemas.get(i);
-        Type type = parseColumnType(s);
-        Column col = new Column(s.getName(), type, s.getComment(), i);
-        addColumn(col);
-      }
-      // These fields are irrelevant for views.
-      numClusteringCols_ = 0;
-      numRows_ = -1;
-      init();
-    } catch (TableLoadingException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new TableLoadingException("Failed to load metadata for view: " + name_, e);
-    }
-  }
-
-  @Override
-  protected void loadFromThrift(TTable t) throws TableLoadingException {
-    super.loadFromThrift(t);
-    init();
-  }
-
-  /**
-   * Initializes the originalViewDef_, inlineViewDef_, and queryStmt_ members
-   * by parsing the expanded view definition SQL-string.
-   * Throws a TableLoadingException if there was any error parsing the
-   * the SQL or if the view definition did not parse into a QueryStmt.
-   */
-  private void init() throws TableLoadingException {
-    // Set view-definition SQL strings.
-    originalViewDef_ = getMetaStoreTable().getViewOriginalText();
-    inlineViewDef_ = getMetaStoreTable().getViewExpandedText();
-    // Parse the expanded view definition SQL-string into a QueryStmt and
-    // populate a view definition.
-    SqlScanner input = new SqlScanner(new StringReader(inlineViewDef_));
-    SqlParser parser = new SqlParser(input);
-    ParseNode node = null;
-    try {
-      node = (ParseNode) parser.parse().value;
-    } catch (Exception e) {
-      // Do not pass e as the exception cause because it might reveal the existence
-      // of tables that the user triggering this load may not have privileges on.
-      throw new TableLoadingException(
-          String.format("Failed to parse view-definition statement of view: " +
-              "%s.%s", db_.getName(), name_));
-    }
-    // Make sure the view definition parses to a query statement.
-    if (!(node instanceof QueryStmt)) {
-      throw new TableLoadingException(String.format("View definition of %s.%s " +
-          "is not a query statement", db_.getName(), name_));
-    }
-    queryStmt_ = (QueryStmt) node;
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.VIEW; }
-  public QueryStmt getQueryStmt() { return queryStmt_; }
-  public String getOriginalViewDef() { return originalViewDef_; }
-  public String getInlineViewDef() { return inlineViewDef_; }
-  public boolean isLocalView() { return isLocalView_; }
-
-  /**
-   * Returns the column labels the user specified in the WITH-clause.
-   */
-  public List<String> getOriginalColLabels() { return colLabels_; }
-
-  /**
-   * Returns the explicit column labels for this view, or null if they need to be derived
-   * entirely from the underlying query statement. The returned list has at least as many
-   * elements as the number of column labels in the query stmt.
-   */
-  public List<String> getColLabels() {
-    if (colLabels_ == null) return null;
-    if (colLabels_.size() >= queryStmt_.getColLabels().size()) return colLabels_;
-    List<String> explicitColLabels = Lists.newArrayList(colLabels_);
-    explicitColLabels.addAll(queryStmt_.getColLabels().subList(
-        colLabels_.size(), queryStmt_.getColLabels().size()));
-    return explicitColLabels;
-  }
-
-  public boolean hasColLabels() { return colLabels_ != null; }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    throw new IllegalStateException("Cannot call toThriftDescriptor() on a view.");
-  }
-
-  @Override
-  public TTable toThrift() {
-    TTable view = super.toThrift();
-    view.setTable_type(TTableType.VIEW);
-    return view;
-  }
-
-  public static View createTestView(Db db, String name, QueryStmt viewDefStmt) {
-    return new View(db, name, viewDefStmt);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/delegates/DdlDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/delegates/DdlDelegate.java b/fe/src/main/java/com/cloudera/impala/catalog/delegates/DdlDelegate.java
deleted file mode 100644
index a21bd90..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/delegates/DdlDelegate.java
+++ /dev/null
@@ -1,75 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog.delegates;
-
-import java.util.List;
-
-import com.cloudera.impala.thrift.TDistributeParam;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-
-/**
- * Abstract class to implement the storage specific portion of DDL requests.
- *
- * During catalog DDL operations the CatalogOpExecutor will instantiate the correct
- * subclass of this class to handle the DDL operation to the storage backend. See,
- * CatalogOpExecutor::createDDLDelegate() for details.
- *
- */
-public abstract class DdlDelegate {
-
-  protected Table msTbl_;
-  protected TAlterTableParams tAlterTableParams_;
-  protected List<TDistributeParam> distributeParams_;
-
-  /**
-   * Creates a new delegate to modify Table 'msTbl'.
-   */
-  public DdlDelegate setMsTbl(Table msTbl) {
-    msTbl_ = msTbl;
-    return this;
-  }
-
-  public DdlDelegate setAlterTableParams(TAlterTableParams p) {
-    tAlterTableParams_ = p;
-    return this;
-  }
-
-  public DdlDelegate setDistributeParams(List<TDistributeParam> p) {
-    distributeParams_ = p;
-    return this;
-  }
-
-  /**
-   * Creates the table.
-   */
-  public abstract void createTable() throws ImpalaRuntimeException;
-
-  /**
-   * Drops the table.
-   */
-  public abstract void dropTable() throws ImpalaRuntimeException;
-
-  /**
-   * Performs an alter table with the parameters set with setAlterTableParams().
-   */
-  public abstract boolean alterTable() throws ImpalaRuntimeException;
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/delegates/KuduDdlDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/delegates/KuduDdlDelegate.java b/fe/src/main/java/com/cloudera/impala/catalog/delegates/KuduDdlDelegate.java
deleted file mode 100644
index ecfeb1a..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/delegates/KuduDdlDelegate.java
+++ /dev/null
@@ -1,190 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog.delegates;
-
-import static com.cloudera.impala.util.KuduUtil.compareSchema;
-import static com.cloudera.impala.util.KuduUtil.fromImpalaType;
-import static com.cloudera.impala.util.KuduUtil.parseKeyColumns;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.kudu.ColumnSchema;
-import org.apache.kudu.ColumnSchema.ColumnSchemaBuilder;
-import org.apache.kudu.Schema;
-import org.apache.kudu.Type;
-import org.apache.kudu.client.CreateTableOptions;
-import org.apache.kudu.client.KuduClient;
-import org.apache.kudu.client.PartialRow;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.util.KuduUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-
-/**
- * Implementation of the Kudu DDL Delegate. Propagates create and drop table statements to
- * Kudu.
- */
-public class KuduDdlDelegate extends DdlDelegate {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KuduDdlDelegate.class);
-
-  public KuduDdlDelegate(Table msTbl) {
-    setMsTbl(msTbl);
-  }
-
-  /**
-   * Creates the Kudu table if it does not exist and returns true. If the table exists and
-   * the table is not a managed table ignore and return false, otherwise throw an
-   * exception.
-   */
-  @Override
-  public void createTable()
-      throws ImpalaRuntimeException {
-
-    String kuduTableName = msTbl_.getParameters().get(KuduTable.KEY_TABLE_NAME);
-    String kuduMasters = msTbl_.getParameters().get(KuduTable.KEY_MASTER_ADDRESSES);
-
-    // Can be optional for un-managed tables
-    String kuduKeyCols = msTbl_.getParameters().get(KuduTable.KEY_KEY_COLUMNS);
-
-    String replication = msTbl_.getParameters().get(KuduTable.KEY_TABLET_REPLICAS);
-
-    try (KuduClient client = new KuduClient.KuduClientBuilder(kuduMasters).build()) {
-      // TODO should we throw if the table does not exist when its an external table?
-      if (client.tableExists(kuduTableName)) {
-        if (msTbl_.getTableType().equals(TableType.MANAGED_TABLE.toString())) {
-          throw new ImpalaRuntimeException(String.format(
-              "Table %s already exists in Kudu master %s.", kuduTableName, kuduMasters));
-        }
-
-        // Check if the external table matches the schema
-        org.apache.kudu.client.KuduTable kuduTable = client.openTable(kuduTableName);
-        if (!compareSchema(msTbl_, kuduTable)) {
-          throw new ImpalaRuntimeException(String.format(
-              "Table %s (%s) has a different schema in Kudu than in Hive.",
-              msTbl_.getTableName(), kuduTableName));
-        }
-        return;
-      }
-
-      HashSet<String> keyColNames = parseKeyColumns(kuduKeyCols);
-      List<ColumnSchema> keyColSchemas = new ArrayList<>();
-
-      // Create a new Schema and map the types accordingly
-      ArrayList<ColumnSchema> columns = Lists.newArrayList();
-      for (FieldSchema fieldSchema: msTbl_.getSd().getCols()) {
-        com.cloudera.impala.catalog.Type catalogType = com.cloudera.impala.catalog.Type
-            .parseColumnType(fieldSchema.getType());
-        if (catalogType == null) {
-          throw new ImpalaRuntimeException(String.format(
-              "Could not parse column type %s.", fieldSchema.getType()));
-        }
-        Type t = fromImpalaType(catalogType);
-        // Create the actual column and check if the column is a key column
-        ColumnSchemaBuilder csb = new ColumnSchemaBuilder(
-            fieldSchema.getName(), t);
-        boolean isKeyColumn = keyColNames.contains(fieldSchema.getName());
-        csb.key(isKeyColumn);
-        csb.nullable(!isKeyColumn);
-        ColumnSchema cs = csb.build();
-        columns.add(cs);
-        if (isKeyColumn) keyColSchemas.add(cs);
-      }
-
-      Schema schema = new Schema(columns);
-      CreateTableOptions cto = new CreateTableOptions();
-
-      // Handle auto-partitioning of the Kudu table
-      if (distributeParams_ != null) {
-        for (TDistributeParam param : distributeParams_) {
-          if (param.isSetBy_hash_param()) {
-            Preconditions.checkState(!param.isSetBy_range_param());
-            cto.addHashPartitions(param.getBy_hash_param().getColumns(),
-                param.getBy_hash_param().getNum_buckets());
-          } else {
-            Preconditions.checkState(param.isSetBy_range_param());
-            cto.setRangePartitionColumns(param.getBy_range_param().getColumns());
-            for (PartialRow p : KuduUtil.parseSplits(schema, param.getBy_range_param())) {
-              cto.addSplitRow(p);
-            }
-          }
-        }
-      }
-
-      if (!Strings.isNullOrEmpty(replication)) {
-        int r = Integer.parseInt(replication);
-        if (r <= 0) {
-          throw new ImpalaRuntimeException(
-              "Number of tablet replicas must be greater than zero. " +
-              "Given number of replicas is: " + Integer.toString(r));
-        }
-        cto.setNumReplicas(r);
-      }
-
-      client.createTable(kuduTableName, schema, cto);
-    } catch (ImpalaRuntimeException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Error creating Kudu table", e);
-    }
-  }
-
-  @Override
-  public void dropTable() throws ImpalaRuntimeException {
-    // If table is an external table, do not delete the data
-    if (msTbl_.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) return;
-
-    String kuduTableName = msTbl_.getParameters().get(KuduTable.KEY_TABLE_NAME);
-    String kuduMasters = msTbl_.getParameters().get(KuduTable.KEY_MASTER_ADDRESSES);
-
-    try (KuduClient client = new KuduClient.KuduClientBuilder(kuduMasters).build()) {
-      if (!client.tableExists(kuduTableName)) {
-        LOG.warn("Table: %s is in inconsistent state. It does not exist in Kudu master(s)"
-            + " %s, but it exists in Hive metastore. Deleting from metastore only.",
-            kuduTableName, kuduMasters);
-        return;
-      }
-      client.deleteTable(kuduTableName);
-      return;
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Error dropping Kudu table", e);
-    }
-  }
-
-  public static boolean canHandle(org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    return KuduTable.isKuduTable(msTbl);
-  }
-
-  @Override
-  public boolean alterTable() throws ImpalaRuntimeException {
-    throw new ImpalaRuntimeException(
-        "Alter table operations are not supported for Kudu tables.");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/delegates/UnsupportedOpDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/delegates/UnsupportedOpDelegate.java b/fe/src/main/java/com/cloudera/impala/catalog/delegates/UnsupportedOpDelegate.java
deleted file mode 100644
index 52267be..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/delegates/UnsupportedOpDelegate.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog.delegates;
-
-import com.cloudera.impala.common.ImpalaRuntimeException;
-
-/**
- * Empty implementation for the DdlDelegate interface that does nothing.
- */
-public class UnsupportedOpDelegate extends DdlDelegate {
-
-  @Override
-  public void createTable() throws ImpalaRuntimeException { }
-
-  @Override
-  public void dropTable() throws ImpalaRuntimeException { }
-
-  @Override
-  public boolean alterTable() throws ImpalaRuntimeException { return true; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/AliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/AliasGenerator.java b/fe/src/main/java/com/cloudera/impala/common/AliasGenerator.java
deleted file mode 100644
index 2d75d18..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/AliasGenerator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.util.Set;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-
-/**
- * Abstract class representing an alias generator. It uses a prefix and a
- * monotonically increasing counter to generate new aliases. Classes extending
- * this class are responsible for initializing the prefix.
- */
-public abstract class AliasGenerator {
-  private int numGeneratedAliases_ = 1;
-  protected String aliasPrefix_ = null;
-  protected Set<String> usedAliases_ = Sets.newHashSet();
-
-  /**
-   * Return the next available alias.
-   */
-  public String getNextAlias() {
-    Preconditions.checkNotNull(aliasPrefix_);
-    while (true) {
-      String candidateAlias = aliasPrefix_ + Integer.toString(numGeneratedAliases_++);
-      if (usedAliases_.add(candidateAlias)) return candidateAlias;
-      if (numGeneratedAliases_ < 0) {
-        throw new IllegalStateException("Overflow occured during alias generation.");
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/AnalysisException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/AnalysisException.java b/fe/src/main/java/com/cloudera/impala/common/AnalysisException.java
deleted file mode 100644
index 464cfa0..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/AnalysisException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Thrown for errors encountered during analysis of a SQL statement.
- *
- */
-public class AnalysisException extends ImpalaException {
-  public AnalysisException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  public AnalysisException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/ByteUnits.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/ByteUnits.java b/fe/src/main/java/com/cloudera/impala/common/ByteUnits.java
deleted file mode 100644
index f8b43ab..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/ByteUnits.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Byte unit constants.
- */
-public class ByteUnits {
-
-  /**
-   * One kilobyte in bytes.
-   */
-  public final static long KILOBYTE = 1024;
-
-  /**
-   * One megabyte in bytes.
-   */
-  public final static long MEGABYTE = KILOBYTE * 1024;
-
-  /**
-   * One gigabyte in bytes.
-   */
-  public final static long GIGABYTE = MEGABYTE * 1024;
-
-  /**
-   * One terabyte in bytes.
-   */
-  public final static long TERABYTE = GIGABYTE * 1024;
-
-  /**
-   * One petabyte in bytes.
-   */
-  public final static long PETABYTE = TERABYTE * 1024;
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/ColumnAliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/ColumnAliasGenerator.java b/fe/src/main/java/com/cloudera/impala/common/ColumnAliasGenerator.java
deleted file mode 100644
index 51c8ece..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/ColumnAliasGenerator.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.util.List;
-
-import com.google.common.base.Preconditions;
-
-public class ColumnAliasGenerator extends AliasGenerator {
-  private static final String DEFAULT_COL_ALIAS_PREFIX = "$c$";
-
-  public ColumnAliasGenerator(List<String> existingLabels, String prefix) {
-    Preconditions.checkNotNull(existingLabels);
-    aliasPrefix_ = prefix != null ? prefix : DEFAULT_COL_ALIAS_PREFIX;
-    usedAliases_.addAll(existingLabels);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/FileSystemUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/FileSystemUtil.java b/fe/src/main/java/com/cloudera/impala/common/FileSystemUtil.java
deleted file mode 100644
index 2239853..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/FileSystemUtil.java
+++ /dev/null
@@ -1,409 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.UUID;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3.S3FileSystem;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Common utility functions for operating on FileSystem objects.
- */
-public class FileSystemUtil {
-  private static final Configuration CONF = new Configuration();
-  private static final Logger LOG = Logger.getLogger(FileSystemUtil.class);
-
-  /**
-   * Performs a non-recursive delete of all visible (non-hidden) files in a given
-   * directory. Returns the number of files deleted as part of this operation.
-   */
-  public static int deleteAllVisibleFiles(Path directory)
-      throws IOException {
-    FileSystem fs = directory.getFileSystem(CONF);
-    Preconditions.checkState(fs.getFileStatus(directory).isDirectory());
-    int numFilesDeleted = 0;
-    for (FileStatus fStatus: fs.listStatus(directory)) {
-      // Only delete files that are not hidden.
-      if (fStatus.isFile() && !isHiddenFile(fStatus.getPath().getName())) {
-        LOG.debug("Removing: " + fStatus.getPath());
-        fs.delete(fStatus.getPath(), false);
-        ++numFilesDeleted;
-      }
-    }
-    return numFilesDeleted;
-  }
-
-  /**
-   * Returns the total number of visible (non-hidden) files in a directory.
-   */
-  public static int getTotalNumVisibleFiles(Path directory) throws IOException {
-    FileSystem fs = directory.getFileSystem(CONF);
-    Preconditions.checkState(fs.getFileStatus(directory).isDirectory());
-    int numFiles = 0;
-    for (FileStatus fStatus: fs.listStatus(directory)) {
-      // Only delete files that are not hidden.
-      if (fStatus.isFile() && !isHiddenFile(fStatus.getPath().getName())) {
-        ++numFiles;
-      }
-    }
-    return numFiles;
-  }
-
-  /**
-   * Returns true if path p1 and path p2 are in the same encryption zone in HDFS.
-   * Returns false if they are in different encryption zones or if either of the paths
-   * are not on HDFS.
-   */
-  private static boolean arePathsInSameHdfsEncryptionZone(FileSystem fs, Path p1,
-      Path p2) throws IOException {
-    // Only distributed file systems have encryption zones.
-    if (!isDistributedFileSystem(p1) || !isDistributedFileSystem(p2)) return false;
-    HdfsAdmin hdfsAdmin = new HdfsAdmin(fs.getUri(), CONF);
-    EncryptionZone z1 = hdfsAdmin.getEncryptionZoneForPath(p1);
-    EncryptionZone z2 = hdfsAdmin.getEncryptionZoneForPath(p2);
-    if (z1 == null && z2 == null) return true;
-    if (z1 == null || z2 == null) return false;
-    return z1.equals(z2);
-  }
-
-  /**
-   * Relocates all visible (non-hidden) files from a source directory to a destination
-   * directory. Files are moved (renamed) to the new location unless the source and
-   * destination directories are in different encryption zones, in which case the files
-   * are copied so that they are decrypted and/or encrypted. Naming conflicts are
-   * resolved by appending a UUID to the base file name. Any sub-directories within the
-   * source directory are skipped. Returns the number of files relocated as part of this
-   * operation.
-   */
-  public static int relocateAllVisibleFiles(Path sourceDir, Path destDir)
-      throws IOException {
-    FileSystem destFs = destDir.getFileSystem(CONF);
-    FileSystem sourceFs = sourceDir.getFileSystem(CONF);
-    Preconditions.checkState(destFs.isDirectory(destDir));
-    Preconditions.checkState(sourceFs.isDirectory(sourceDir));
-
-    // Use the same UUID to resolve all file name conflicts. This helps mitigate problems
-    // that might happen if there is a conflict moving a set of files that have
-    // dependent file names. For example, foo.lzo and foo.lzo_index.
-    UUID uuid = UUID.randomUUID();
-
-    // Enumerate all the files in the source
-    int numFilesMoved = 0;
-    for (FileStatus fStatus: sourceFs.listStatus(sourceDir)) {
-      if (fStatus.isDirectory()) {
-        LOG.debug("Skipping copy of directory: " + fStatus.getPath());
-        continue;
-      } else if (isHiddenFile(fStatus.getPath().getName())) {
-        continue;
-      }
-
-      Path destFile = new Path(destDir, fStatus.getPath().getName());
-      if (destFs.exists(destFile)) {
-        destFile = new Path(destDir,
-            appendToBaseFileName(destFile.getName(), uuid.toString()));
-      }
-      FileSystemUtil.relocateFile(fStatus.getPath(), destFile, false);
-      ++numFilesMoved;
-    }
-    return numFilesMoved;
-  }
-
-  /**
-   * Relocates the given file to a new location (either another directory or a
-   * file in the same or different filesystem). The file is generally moved (renamed) to
-   * the new location. However, the file is copied if the source and destination are in
-   * different encryption zones so that the file can be decrypted and/or encrypted, or if
-   * the source and destination are in different filesystems. If renameIfAlreadyExists is
-   * true, no error will be thrown if a file with the same name already exists in the
-   * destination location. Instead, a UUID will be appended to the base file name,
-   * preserving the existing file extension. If renameIfAlreadyExists is false, an
-   * IOException will be thrown if there is a file name conflict.
-   */
-  public static void relocateFile(Path sourceFile, Path dest,
-      boolean renameIfAlreadyExists) throws IOException {
-    FileSystem destFs = dest.getFileSystem(CONF);
-    FileSystem sourceFs = sourceFile.getFileSystem(CONF);
-
-    Path destFile =
-        destFs.isDirectory(dest) ? new Path(dest, sourceFile.getName()) : dest;
-    // If a file with the same name does not already exist in the destination location
-    // then use the same file name. Otherwise, generate a unique file name.
-    if (renameIfAlreadyExists && destFs.exists(destFile)) {
-      Path destDir = destFs.isDirectory(dest) ? dest : dest.getParent();
-      destFile = new Path(destDir,
-          appendToBaseFileName(destFile.getName(), UUID.randomUUID().toString()));
-    }
-    boolean sameFileSystem = isPathOnFileSystem(sourceFile, destFs);
-    boolean destIsDfs = isDistributedFileSystem(destFs);
-
-    // If the source and the destination are on different file systems, or in different
-    // encryption zones, files can't be moved from one location to the other and must be
-    // copied instead.
-    boolean sameEncryptionZone =
-        arePathsInSameHdfsEncryptionZone(destFs, sourceFile, destFile);
-    // We can do a rename if the src and dst are in the same encryption zone in the same
-    // distributed filesystem.
-    boolean doRename = destIsDfs && sameFileSystem && sameEncryptionZone;
-    // Alternatively, we can do a rename if the src and dst are on the same
-    // non-distributed filesystem.
-    if (!doRename) doRename = !destIsDfs && sameFileSystem;
-    if (doRename) {
-      LOG.debug(String.format(
-          "Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
-      // Move (rename) the file.
-      destFs.rename(sourceFile, destFile);
-      return;
-    }
-    if (destIsDfs && sameFileSystem) {
-      Preconditions.checkState(!doRename);
-      // We must copy rather than move if the source and dest are in different
-      // encryption zones. A move would return an error from the NN because a move is a
-      // metadata-only operation and the files would not be encrypted/decrypted properly
-      // on the DNs.
-      LOG.info(String.format(
-          "Copying source '%s' to '%s' because HDFS encryption zones are different.",
-          sourceFile, destFile));
-    } else {
-      Preconditions.checkState(!sameFileSystem);
-      LOG.info(String.format("Copying '%s' to '%s' between filesystems.",
-          sourceFile, destFile));
-    }
-    FileUtil.copy(sourceFs, sourceFile, destFs, destFile, true, true, CONF);
-  }
-
-  /**
-   * Reads the file at path and returns the contents.
-   */
-  public static String readFile(Path file) throws IOException {
-    FileSystem fs = file.getFileSystem(CONF);
-    InputStream fileStream = fs.open(file);
-    try {
-      return IOUtils.toString(fileStream);
-    } finally {
-      IOUtils.closeQuietly(fileStream);
-    }
-  }
-
-  /**
-   * Builds a new file name based on a base file name. This is done by inserting
-   * the given appendStr into the base file name, preserving the file extension (if
-   * one exists).
-   * For example, this could be passed a UUID string to uniquify files:
-   * file1.snap -> file1_<uuid>.snap
-   * file1 -> file1_<uuid>
-   */
-  private static String appendToBaseFileName(String baseFileName, String appendStr) {
-    StringBuilder sb = new StringBuilder(baseFileName);
-    // Insert the string to append, preserving the file extension.
-    int extensionIdx = baseFileName.lastIndexOf('.');
-    if (extensionIdx != -1) {
-      sb.replace(extensionIdx, extensionIdx + 1, "_" + appendStr + ".");
-    } else {
-      sb.append("_" + appendStr);
-    }
-    return sb.toString();
-  }
-
-  /**
-   * Returns true if the given Path contains any visible sub directories, otherwise false.
-   */
-  public static boolean containsVisibleSubdirectory(Path directory)
-      throws FileNotFoundException, IOException {
-    FileSystem fs = directory.getFileSystem(CONF);
-    // Enumerate all the files in the source
-    for (FileStatus fStatus: fs.listStatus(directory)) {
-      String pathName = fStatus.getPath().getName();
-      if (fStatus.isDirectory() && !isHiddenFile(pathName)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Makes a temporary unique directory within the given directory.
-   */
-  public static Path makeTmpSubdirectory(Path directory) throws IOException {
-    FileSystem fs = directory.getFileSystem(CONF);
-    Path tmpDir = new Path(directory, ".tmp_" + UUID.randomUUID().toString());
-    fs.mkdirs(tmpDir);
-    return tmpDir;
-  }
-
-  public static boolean isHiddenFile(String fileName) {
-    // Hidden files start with '.' or '_'. The '.copying' suffix is used by some
-    // filesystem utilities (e.g. hdfs put) as a temporary destination when copying
-    // files. The '.tmp' suffix is Flume's default for temporary files.
-    String lcFileName = fileName.toLowerCase();
-    return lcFileName.startsWith(".") || lcFileName.startsWith("_") ||
-        lcFileName.endsWith(".copying") || lcFileName.endsWith(".tmp");
-  }
-
-  /**
-   * Returns true if the filesystem might override getFileBlockLocations().
-   */
-  public static boolean hasGetFileBlockLocations(FileSystem fs) {
-    // Common case.
-    if (isDistributedFileSystem(fs)) return true;
-    // Blacklist FileSystems that are known to not implement getFileBlockLocations().
-    return !(fs instanceof S3AFileSystem || fs instanceof NativeS3FileSystem ||
-        fs instanceof S3FileSystem || fs instanceof LocalFileSystem);
-  }
-
-  /**
-   * Returns true iff the filesystem is a S3AFileSystem.
-   */
-  public static boolean isS3AFileSystem(FileSystem fs) {
-    return fs instanceof S3AFileSystem;
-  }
-
-  /**
-   * Returns true iff the path is on a S3AFileSystem.
-   */
-  public static boolean isS3AFileSystem(Path path) throws IOException {
-    return isS3AFileSystem(path.getFileSystem(CONF));
-  }
-
-  /**
-   * Returns true iff the filesystem is an instance of LocalFileSystem.
-   */
-  public static boolean isLocalFileSystem(FileSystem fs) {
-    return fs instanceof LocalFileSystem;
-  }
-
-  /**
-   * Return true iff path is on a local filesystem.
-   */
-  public static boolean isLocalFileSystem(Path path) throws IOException {
-    return isLocalFileSystem(path.getFileSystem(CONF));
-  }
-
-  /**
-   * Returns true iff the filesystem is a DistributedFileSystem.
-   */
-  public static boolean isDistributedFileSystem(FileSystem fs) {
-    return fs instanceof DistributedFileSystem;
-  }
-
-  /**
-   * Return true iff path is on a DFS filesystem.
-   */
-  public static boolean isDistributedFileSystem(Path path) throws IOException {
-    return isDistributedFileSystem(path.getFileSystem(CONF));
-  }
-
-  public static FileSystem getDefaultFileSystem() throws IOException {
-    Path path = new Path(FileSystem.getDefaultUri(CONF));
-    FileSystem fs = path.getFileSystem(CONF);
-    return fs;
-  }
-
-  public static DistributedFileSystem getDistributedFileSystem() throws IOException {
-    FileSystem fs = getDefaultFileSystem();
-    Preconditions.checkState(fs instanceof DistributedFileSystem);
-    return (DistributedFileSystem) fs;
-  }
-
-  /**
-   * Fully-qualifies the given path based on the FileSystem configuration.
-   */
-  public static Path createFullyQualifiedPath(Path location) {
-    URI defaultUri = FileSystem.getDefaultUri(CONF);
-    URI locationUri = location.toUri();
-    // Use the default URI only if location has no scheme or it has the same scheme as
-    // the default URI.  Otherwise, Path.makeQualified() will incorrectly use the
-    // authority from the default URI even though the schemes don't match.  See HDFS-7031.
-    if (locationUri.getScheme() == null ||
-        locationUri.getScheme().equalsIgnoreCase(defaultUri.getScheme())) {
-      return location.makeQualified(defaultUri, location);
-    }
-    // Already qualified (has scheme).
-    return location;
-  }
-
-  /**
-   * Return true iff the path is on the given filesystem.
-   */
-  public static boolean isPathOnFileSystem(Path path, FileSystem fs) {
-    try {
-      // Call makeQualified() for the side-effect of FileSystem.checkPath() which will
-      // throw an exception if path is not on fs.
-      fs.makeQualified(path);
-      return true;
-    } catch (IllegalArgumentException e) {
-      // Path is not on fs.
-      return false;
-    }
-  }
-
-  /**
-   * Copies the source file to a destination path on the local filesystem.
-   * Throws IOException on failure.
-   */
-  public static void copyToLocal(Path source, Path dest) throws IOException {
-    FileSystem fs = source.getFileSystem(CONF);
-    fs.copyToLocalFile(source, dest);
-  }
-
-  /**
-   * Returns true if the given path is a location which supports caching (e.g. HDFS).
-   */
-  public static boolean isPathCacheable(Path path) {
-    try {
-      return isDistributedFileSystem(path);
-    } catch (IOException e) {
-      return false;
-    }
-  }
-
-  /**
-   * Returns the configuration.
-   */
-  public static Configuration getConfiguration() {
-    return CONF;
-  }
-
-  /**
-   * Returns true iff the given location is on a filesystem that Impala can write to.
-   */
-  public static boolean isImpalaWritableFilesystem(String location)
-      throws IOException {
-    Path path = new Path(location);
-    return (FileSystemUtil.isDistributedFileSystem(path) ||
-        FileSystemUtil.isLocalFileSystem(path) || FileSystemUtil.isS3AFileSystem(path));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/Id.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/Id.java b/fe/src/main/java/com/cloudera/impala/common/Id.java
deleted file mode 100644
index 88f626f..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/Id.java
+++ /dev/null
@@ -1,76 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
-/**
- * Integer ids that cannot accidentally be compared with ints.
- */
-public class Id<IdType extends Id<IdType>> implements Comparable<Id<IdType>> {
-  static protected int INVALID_ID = -1;
-  protected final int id_;
-
-  public Id(int id) {
-    this.id_ = id;
-  }
-
-  public boolean isValid() { return id_ != INVALID_ID; }
-  public int asInt() { return id_; }
-
-  @Override
-  public int hashCode() {
-    return Integer.valueOf(id_).hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return Integer.toString(id_);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    // only ids of the same subclass are comparable
-    if (obj.getClass() != this.getClass()) return false;
-    return ((Id)obj).id_ == id_;
-  }
-
-  @Override
-  public int compareTo(Id<IdType> cmp) {
-    return id_ - cmp.id_;
-  }
-
-  public ArrayList<IdType> asList() {
-    ArrayList<IdType> list = new ArrayList<IdType>();
-    list.add((IdType) this);
-    return list;
-  }
-
-  public static <C extends Id> String printIds(List<C> ids) {
-    ArrayList<String> l = Lists.newArrayList();
-    for (C id: ids) {
-      l.add(id.toString());
-    }
-    return "(" + Joiner.on(" ").join(l) + ")";
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/IdGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/IdGenerator.java b/fe/src/main/java/com/cloudera/impala/common/IdGenerator.java
deleted file mode 100644
index cdece7d..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/IdGenerator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Generator of consecutively numbered integers to be used as ids by subclasses of Id.
- * Subclasses of Id should be able to create a generator for their Id type.
- */
-public abstract class IdGenerator<IdType extends Id<IdType>> {
-  protected int nextId_ = 0;
-  public abstract IdType getNextId();
-  public abstract IdType getMaxId();
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/ImpalaException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/ImpalaException.java b/fe/src/main/java/com/cloudera/impala/common/ImpalaException.java
deleted file mode 100644
index 6a50bf8..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/ImpalaException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-
-/**
- * The parent class of all custom Impala exceptions.
- *
- */
-abstract public class ImpalaException extends java.lang.Exception {
-  public ImpalaException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  protected ImpalaException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/ImpalaRuntimeException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/ImpalaRuntimeException.java b/fe/src/main/java/com/cloudera/impala/common/ImpalaRuntimeException.java
deleted file mode 100644
index 27a8925..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/ImpalaRuntimeException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Thrown for errors encountered during the execution of a SQL statement.
- *
- */
-public class ImpalaRuntimeException extends ImpalaException {
-  public ImpalaRuntimeException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  public ImpalaRuntimeException(String msg) {
-    super(msg);
-  }
-}



[50/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
new file mode 100644
index 0000000..a6a731b
--- /dev/null
+++ b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
@@ -0,0 +1,323 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.extdatasource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.Timestamp;
+import java.util.List;
+import java.util.UUID;
+
+import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
+import com.cloudera.impala.extdatasource.thrift.TCloseParams;
+import com.cloudera.impala.extdatasource.thrift.TCloseResult;
+import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
+import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
+import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
+import com.cloudera.impala.extdatasource.thrift.TOpenParams;
+import com.cloudera.impala.extdatasource.thrift.TOpenResult;
+import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
+import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
+import com.cloudera.impala.extdatasource.thrift.TRowBatch;
+import com.cloudera.impala.extdatasource.thrift.TTableSchema;
+import com.cloudera.impala.extdatasource.util.SerializationUtils;
+import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
+import com.cloudera.impala.thrift.TColumnData;
+import com.cloudera.impala.thrift.TColumnType;
+import com.cloudera.impala.thrift.TPrimitiveType;
+import com.cloudera.impala.thrift.TScalarType;
+import com.cloudera.impala.thrift.TStatus;
+import com.cloudera.impala.thrift.TErrorCode;
+import com.cloudera.impala.thrift.TTypeNodeType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+/**
+ * Data source implementation for tests that can:
+ * (a) Accepts every other offered conjunct for testing planning (though predicates are
+ *     not actually evaluated) and returns trivial data of all supported types for
+ *     query tests.
+ * (b) Validate the predicates offered by Impala.
+ */
+public class AllTypesDataSource implements ExternalDataSource {
+  // Total number of rows to return
+  private static final int NUM_ROWS_RETURNED = 5000;
+
+  // Change the size of the batches that are returned
+  private static final int INITIAL_BATCH_SIZE = 500;
+  private static final int BATCH_SIZE_INCREMENT = 100;
+
+  private static final TStatus STATUS_OK =
+      new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
+
+  private int currRow_;
+  private boolean eos_;
+  private int batchSize_;
+  private TTableSchema schema_;
+  private DataSourceState state_;
+  private String scanHandle_;
+  private String validatePredicatesResult_;
+
+  // Enumerates the states of the data source.
+  private enum DataSourceState {
+    CREATED,
+    OPENED,
+    CLOSED
+  }
+
+  public AllTypesDataSource() {
+    eos_ = false;
+    currRow_ = 0;
+    state_ = DataSourceState.CREATED;
+  }
+
+  /**
+   * Accepts every other conjunct and returns the constant number of rows that
+   * is always returned.
+   */
+  @Override
+  public TPrepareResult prepare(TPrepareParams params) {
+    Preconditions.checkState(state_ == DataSourceState.CREATED);
+    List<Integer> accepted = Lists.newArrayList();
+    int numRowsReturned = 0;
+    if (validatePredicates(params.getPredicates())) {
+      // Indicate all predicates are applied because we return a dummy row with the
+      // result later to validate the result in tests. Impala shouldn't try to apply
+      // predicates to that dummy row.
+      for (int i = 0; i < params.getPredicatesSize(); ++i) accepted.add(i);
+      numRowsReturned = 1;
+    } else {
+      // Default behavior is to accept every other predicate. They are not actually
+      // applied, but we want to validate that Impala applies the correct predicates.
+      for (int i = 0; i < params.getPredicatesSize(); ++i) {
+        if (i % 2 == 0) accepted.add(i);
+      }
+      numRowsReturned = NUM_ROWS_RETURNED;
+    }
+    return new TPrepareResult(STATUS_OK)
+      .setAccepted_conjuncts(accepted)
+      .setNum_rows_estimate(numRowsReturned);
+  }
+
+  /**
+   * If the predicate value (assuming STRING) starts with 'VALIDATE_PREDICATES##',
+   * we validate the TPrepareParams.predicates against predicates specified after the
+   * 'VALIDATE_PREDICATES##' and return true. The result of the validation is stored
+   * in validatePredicatesResult_.
+   *
+   * The expected predicates are specified in the form "{slot} {TComparisonOp} {val}",
+   * and conjunctive predicates are separated by '&&'.
+   *
+   * For example, the predicates_spec validates the predicates in the following query:
+   *    select * from table_name
+   *    where predicates_spec = 'x LT 1 && y GT 2' and
+   *          x < 1 and
+   *          2 > y;
+   *
+   * Current limitations:
+   *  - Disjunctive predicates are not supported (e.g. "expr1 or expr2")
+   *  - Only INT is supported
+   */
+  private boolean validatePredicates(List<List<TBinaryPredicate>> predicates) {
+    if (predicates == null || predicates.isEmpty()) return false;
+    TBinaryPredicate firstPredicate = predicates.get(0).get(0);
+    if (!firstPredicate.getValue().isSetString_val()) return false;
+    String colVal = firstPredicate.getValue().getString_val();
+    if (!colVal.toUpperCase().startsWith("VALIDATE_PREDICATES##")) return false;
+
+    String[] colValParts = colVal.split("##");
+    Preconditions.checkArgument(colValParts.length == 2);
+    String[] expectedPredicates = colValParts[1].split("&&");
+    Preconditions.checkArgument(expectedPredicates.length == predicates.size() - 1);
+
+    String result = "SUCCESS";
+    for (int i = 1; i < predicates.size(); ++i) {
+      String[] predicateParts = expectedPredicates[i - 1].trim().split(" ");
+      Preconditions.checkArgument(predicateParts.length == 3);
+      TBinaryPredicate predicate =
+          Iterables.getOnlyElement(predicates.get(i));
+      Preconditions.checkArgument(predicate.getValue().isSetInt_val());
+
+      String slotName = predicate.getCol().getName().toUpperCase();
+      int intVal = predicate.getValue().getInt_val();
+      if (!predicateParts[0].toUpperCase().equals(slotName) ||
+          !predicateParts[1].toUpperCase().equals(predicate.getOp().name()) ||
+          !predicateParts[2].equals(Integer.toString(intVal))) {
+        result = "Failed predicate, expected=" + expectedPredicates[i - 1].trim() +
+            " actual=" + predicate.toString();
+      }
+    }
+    validatePredicatesResult_ = result;
+    return true;
+  }
+
+  /**
+   * Initializes the batch size and stores the table schema.
+   */
+  @Override
+  public TOpenResult open(TOpenParams params) {
+    Preconditions.checkState(state_ == DataSourceState.CREATED);
+    state_ = DataSourceState.OPENED;
+    batchSize_ = INITIAL_BATCH_SIZE;
+    schema_ = params.getRow_schema();
+    // Need to check validatePredicates again because the call in Prepare() was from
+    // the frontend and used a different instance of this data source class.
+    if (validatePredicates(params.getPredicates())) {
+      // If validating predicates, only one STRING column should be selected.
+      Preconditions.checkArgument(schema_.getColsSize() == 1);
+      TColumnDesc firstCol = schema_.getCols().get(0);
+      TColumnType firstType = firstCol.getType();
+      Preconditions.checkState(firstType.getTypesSize() == 1);
+      Preconditions.checkState(firstType.types.get(0).getType() == TTypeNodeType.SCALAR);
+      Preconditions.checkArgument(
+          firstType.types.get(0).scalar_type.getType() == TPrimitiveType.STRING);
+    }
+    scanHandle_ = UUID.randomUUID().toString();
+    return new TOpenResult(STATUS_OK).setScan_handle(scanHandle_);
+  }
+
+  /**
+   * If validating predicates, returns a single row with the result of the validation.
+   * Otherwise returns row batches with generated rows based on the row index. Called
+   * multiple times, so the current row is stored between calls. Each row batch is a
+   * different size (not necessarily the size specified by TOpenParams.batch_size to
+   * ensure that Impala can handle unexpected batch sizes.
+   */
+  @Override
+  public TGetNextResult getNext(TGetNextParams params) {
+    Preconditions.checkState(state_ == DataSourceState.OPENED);
+    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
+    if (eos_) return new TGetNextResult(STATUS_OK).setEos(eos_);
+
+    if (validatePredicatesResult_ != null) {
+      TColumnData colData = new TColumnData();
+      colData.setIs_null(Lists.newArrayList(false));
+      colData.setString_vals(Lists.newArrayList(validatePredicatesResult_));
+      eos_ = true;
+      return new TGetNextResult(STATUS_OK).setEos(eos_)
+          .setRows(new TRowBatch().setCols(Lists.newArrayList(colData)).setNum_rows(1));
+    }
+
+    List<TColumnData> cols = Lists.newArrayList();
+    for (int i = 0; i < schema_.getColsSize(); ++i) {
+      cols.add(new TColumnData().setIs_null(Lists.<Boolean>newArrayList()));
+    }
+
+    int numAdded = 0;
+    while (currRow_ < NUM_ROWS_RETURNED && numAdded < batchSize_) {
+      addRow(cols);
+      ++numAdded;
+      ++currRow_;
+    }
+
+    batchSize_ += BATCH_SIZE_INCREMENT;
+    if (currRow_ == NUM_ROWS_RETURNED) eos_ = true;
+    return new TGetNextResult(STATUS_OK).setEos(eos_)
+        .setRows(new TRowBatch().setCols(cols).setNum_rows(numAdded));
+  }
+
+  /**
+   * Adds a row to the set of columns. For all numeric types the value is set to the
+   * row index (mod the size for integer types). For strings it is just a string
+   * containing the row index and every 5th result is null.
+   */
+  private void addRow(List<TColumnData> cols) {
+    for (int i = 0; i < cols.size(); ++i) {
+      TColumnDesc colDesc = schema_.getCols().get(i);
+      TColumnData colData = cols.get(i);
+      TColumnType type = colDesc.getType();
+      if (type.types.get(0).getType() != TTypeNodeType.SCALAR) {
+        // Unsupported non-scalar type.
+        throw new UnsupportedOperationException("Unsupported column type: " +
+            type.types.get(0).getType());
+      }
+      Preconditions.checkState(type.getTypesSize() == 1);
+      TScalarType scalarType = type.types.get(0).scalar_type;
+      switch (scalarType.type) {
+        case TINYINT:
+          colData.addToIs_null(false);
+          colData.addToByte_vals((byte) (currRow_ % 10));
+          break;
+        case SMALLINT:
+          colData.addToIs_null(false);
+          colData.addToShort_vals((short) (currRow_ % 100));
+          break;
+        case INT:
+          colData.addToIs_null(false);
+          colData.addToInt_vals(currRow_);
+          break;
+        case BIGINT:
+          colData.addToIs_null(false);
+          colData.addToLong_vals((long) currRow_ * 10);
+          break;
+        case DOUBLE:
+          colData.addToIs_null(false);
+          colData.addToDouble_vals(currRow_);
+          break;
+        case FLOAT:
+          colData.addToIs_null(false);
+          colData.addToDouble_vals((float) (1.1 * currRow_));
+          break;
+        case STRING:
+          if (currRow_ % 5 == 0) {
+            colData.addToIs_null(true);
+          } else {
+            colData.addToIs_null(false);
+            colData.addToString_vals(String.valueOf(currRow_));
+          }
+          break;
+        case BOOLEAN:
+          colData.addToIs_null(false);
+          colData.addToBool_vals(currRow_ % 2 == 0);
+          break;
+        case TIMESTAMP:
+          colData.addToIs_null(false);
+          colData.addToBinary_vals(
+            SerializationUtils.encodeTimestamp(new Timestamp(currRow_)));
+          break;
+        case DECIMAL:
+          colData.addToIs_null(false);
+          BigInteger maxUnscaled = BigInteger.TEN.pow(scalarType.getPrecision());
+          BigInteger val = maxUnscaled.subtract(BigInteger.valueOf(currRow_ + 1));
+          val = val.mod(maxUnscaled);
+          if (currRow_ % 2 == 0) val = val.negate();
+          colData.addToBinary_vals(SerializationUtils.encodeDecimal(new BigDecimal(val)));
+          break;
+        case BINARY:
+        case CHAR:
+        case DATE:
+        case DATETIME:
+        case INVALID_TYPE:
+        case NULL_TYPE:
+        default:
+          // Unsupported.
+          throw new UnsupportedOperationException("Unsupported column type: " +
+              scalarType.getType());
+      }
+    }
+  }
+
+  @Override
+  public TCloseResult close(TCloseParams params) {
+    Preconditions.checkState(state_ == DataSourceState.OPENED);
+    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
+    state_ = DataSourceState.CLOSED;
+    return new TCloseResult(STATUS_OK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java b/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
deleted file mode 100644
index e8e9445..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
+++ /dev/null
@@ -1,742 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.planner.DataPartition;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Encapsulates all the information needed to compute the aggregate functions of a single
- * Select block, including a possible 2nd phase aggregation step for DISTINCT aggregate
- * functions and merge aggregation steps needed for distributed execution.
- *
- * The latter requires a tree structure of AggregateInfo objects which express the
- * original aggregate computations as well as the necessary merging aggregate
- * computations.
- * TODO: get rid of this by transforming
- *   SELECT COUNT(DISTINCT a, b, ..) GROUP BY x, y, ...
- * into an equivalent query with a inline view:
- *   SELECT COUNT(*) FROM (SELECT DISTINCT a, b, ..., x, y, ...) GROUP BY x, y, ...
- *
- * The tree structure looks as follows:
- * - for non-distinct aggregation:
- *   - aggInfo: contains the original aggregation functions and grouping exprs
- *   - aggInfo.mergeAggInfo: contains the merging aggregation functions (grouping
- *     exprs are identical)
- * - for distinct aggregation (for an explanation of the phases, see
- *   SelectStmt.createDistinctAggInfo()):
- *   - aggInfo: contains the phase 1 aggregate functions and grouping exprs
- *   - aggInfo.2ndPhaseDistinctAggInfo: contains the phase 2 aggregate functions and
- *     grouping exprs
- *   - aggInfo.mergeAggInfo: contains the merging aggregate functions for the phase 1
- *     computation (grouping exprs are identical)
- *   - aggInfo.2ndPhaseDistinctAggInfo.mergeAggInfo: contains the merging aggregate
- *     functions for the phase 2 computation (grouping exprs are identical)
- *
- * In general, merging aggregate computations are idempotent; in other words,
- * aggInfo.mergeAggInfo == aggInfo.mergeAggInfo.mergeAggInfo.
- *
- * TODO: move the merge construction logic from SelectStmt into AggregateInfo
- * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1.
- */
-public class AggregateInfo extends AggregateInfoBase {
-  private final static Logger LOG = LoggerFactory.getLogger(AggregateInfo.class);
-
-  public enum AggPhase {
-    FIRST,
-    FIRST_MERGE,
-    SECOND,
-    SECOND_MERGE;
-
-    public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; }
-  };
-
-  // created by createMergeAggInfo()
-  private AggregateInfo mergeAggInfo_;
-
-  // created by createDistinctAggInfo()
-  private AggregateInfo secondPhaseDistinctAggInfo_;
-
-  private final AggPhase aggPhase_;
-
-  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
-  // in the intermediate tuple. Identical to outputTupleSmap_ if no aggregateExpr has an
-  // output type that is different from its intermediate type.
-  protected ExprSubstitutionMap intermediateTupleSmap_ = new ExprSubstitutionMap();
-
-  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
-  // in the output tuple.
-  protected ExprSubstitutionMap outputTupleSmap_ = new ExprSubstitutionMap();
-
-  // Map from slots of outputTupleSmap_ to the corresponding slot in
-  // intermediateTupleSmap_.
-  protected ExprSubstitutionMap outputToIntermediateTupleSmap_ =
-      new ExprSubstitutionMap();
-
-  // if set, a subset of groupingExprs_; set and used during planning
-  private List<Expr> partitionExprs_;
-
-  // C'tor creates copies of groupingExprs and aggExprs.
-  private AggregateInfo(ArrayList<Expr> groupingExprs,
-      ArrayList<FunctionCallExpr> aggExprs, AggPhase aggPhase)  {
-    super(groupingExprs, aggExprs);
-    aggPhase_ = aggPhase;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private AggregateInfo(AggregateInfo other) {
-    super(other);
-    if (other.mergeAggInfo_ != null) {
-      mergeAggInfo_ = other.mergeAggInfo_.clone();
-    }
-    if (other.secondPhaseDistinctAggInfo_ != null) {
-      secondPhaseDistinctAggInfo_ = other.secondPhaseDistinctAggInfo_.clone();
-    }
-    aggPhase_ = other.aggPhase_;
-    outputTupleSmap_ = other.outputTupleSmap_.clone();
-    if (other.requiresIntermediateTuple()) {
-      intermediateTupleSmap_ = other.intermediateTupleSmap_.clone();
-    } else {
-      Preconditions.checkState(other.intermediateTupleDesc_ == other.outputTupleDesc_);
-      intermediateTupleSmap_ = outputTupleSmap_;
-    }
-    partitionExprs_ =
-        (other.partitionExprs_ != null) ? Expr.cloneList(other.partitionExprs_) : null;
-  }
-
-  public List<Expr> getPartitionExprs() { return partitionExprs_; }
-  public void setPartitionExprs(List<Expr> exprs) { partitionExprs_ = exprs; }
-
-  /**
-   * Creates complete AggregateInfo for groupingExprs and aggExprs, including
-   * aggTupleDesc and aggTupleSMap. If parameter tupleDesc != null, sets aggTupleDesc to
-   * that instead of creating a new descriptor (after verifying that the passed-in
-   * descriptor is correct for the given aggregation).
-   * Also creates mergeAggInfo and secondPhaseDistinctAggInfo, if needed.
-   * If an aggTupleDesc is created, also registers eq predicates between the
-   * grouping exprs and their respective slots with 'analyzer'.
-   */
-  static public AggregateInfo create(
-      ArrayList<Expr> groupingExprs, ArrayList<FunctionCallExpr> aggExprs,
-      TupleDescriptor tupleDesc, Analyzer analyzer)
-          throws AnalysisException {
-    Preconditions.checkState(
-        (groupingExprs != null && !groupingExprs.isEmpty())
-        || (aggExprs != null && !aggExprs.isEmpty()));
-    Expr.removeDuplicates(groupingExprs);
-    Expr.removeDuplicates(aggExprs);
-    AggregateInfo result = new AggregateInfo(groupingExprs, aggExprs, AggPhase.FIRST);
-
-    // collect agg exprs with DISTINCT clause
-    ArrayList<FunctionCallExpr> distinctAggExprs = Lists.newArrayList();
-    if (aggExprs != null) {
-      for (FunctionCallExpr aggExpr: aggExprs) {
-        if (aggExpr.isDistinct()) distinctAggExprs.add(aggExpr);
-      }
-    }
-
-    if (distinctAggExprs.isEmpty()) {
-      if (tupleDesc == null) {
-        result.createTupleDescs(analyzer);
-        result.createSmaps(analyzer);
-      } else {
-        // A tupleDesc should only be given for UNION DISTINCT.
-        Preconditions.checkState(aggExprs == null);
-        result.outputTupleDesc_ = tupleDesc;
-        result.intermediateTupleDesc_ = tupleDesc;
-      }
-      result.createMergeAggInfo(analyzer);
-    } else {
-      // we don't allow you to pass in a descriptor for distinct aggregation
-      // (we need two descriptors)
-      Preconditions.checkState(tupleDesc == null);
-      result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer);
-    }
-    LOG.debug("agg info:\n" + result.debugString());
-    return result;
-  }
-
-  /**
-   * Create aggregate info for select block containing aggregate exprs with
-   * DISTINCT clause.
-   * This creates:
-   * - aggTupleDesc
-   * - a complete secondPhaseDistinctAggInfo
-   * - mergeAggInfo
-   *
-   * At the moment, we require that all distinct aggregate
-   * functions be applied to the same set of exprs (ie, we can't do something
-   * like SELECT COUNT(DISTINCT id), COUNT(DISTINCT address)).
-   * Aggregation happens in two successive phases:
-   * - the first phase aggregates by all grouping exprs plus all parameter exprs
-   *   of DISTINCT aggregate functions
-   *
-   * Example:
-   *   SELECT a, COUNT(DISTINCT b, c), MIN(d), COUNT(*) FROM T GROUP BY a
-   * - 1st phase grouping exprs: a, b, c
-   * - 1st phase agg exprs: MIN(d), COUNT(*)
-   * - 2nd phase grouping exprs: a
-   * - 2nd phase agg exprs: COUNT(*), MIN(<MIN(d) from 1st phase>),
-   *     SUM(<COUNT(*) from 1st phase>)
-   *
-   * TODO: expand implementation to cover the general case; this will require
-   * a different execution strategy
-   */
-  private void createDistinctAggInfo(
-      ArrayList<Expr> origGroupingExprs,
-      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
-          throws AnalysisException {
-    Preconditions.checkState(!distinctAggExprs.isEmpty());
-    // make sure that all DISTINCT params are the same;
-    // ignore top-level implicit casts in the comparison, we might have inserted
-    // those during analysis
-    ArrayList<Expr> expr0Children = Lists.newArrayList();
-
-    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
-        "group_concat")) {
-      // Ignore separator parameter, otherwise the same would have to be present for all
-      // other distinct aggregates as well.
-      // TODO: Deal with constant exprs more generally, instead of special-casing
-      // group_concat().
-      expr0Children.add(distinctAggExprs.get(0).getChild(0).ignoreImplicitCast());
-    } else {
-      for (Expr expr : distinctAggExprs.get(0).getChildren()) {
-        expr0Children.add(expr.ignoreImplicitCast());
-      }
-    }
-    for (int i = 1; i < distinctAggExprs.size(); ++i) {
-      ArrayList<Expr> exprIChildren = Lists.newArrayList();
-      if (distinctAggExprs.get(i).getFnName().getFunction().equalsIgnoreCase(
-          "group_concat")) {
-        exprIChildren.add(distinctAggExprs.get(i).getChild(0).ignoreImplicitCast());
-      } else {
-        for (Expr expr : distinctAggExprs.get(i).getChildren()) {
-          exprIChildren.add(expr.ignoreImplicitCast());
-        }
-      }
-      if (!Expr.equalLists(expr0Children, exprIChildren)) {
-        throw new AnalysisException(
-            "all DISTINCT aggregate functions need to have the same set of "
-            + "parameters as " + distinctAggExprs.get(0).toSql()
-            + "; deviating function: " + distinctAggExprs.get(i).toSql());
-      }
-    }
-
-    // add DISTINCT parameters to grouping exprs
-    groupingExprs_.addAll(expr0Children);
-
-    // remove DISTINCT aggregate functions from aggExprs
-    aggregateExprs_.removeAll(distinctAggExprs);
-
-    createTupleDescs(analyzer);
-    createSmaps(analyzer);
-    createMergeAggInfo(analyzer);
-    createSecondPhaseAggInfo(origGroupingExprs, distinctAggExprs, analyzer);
-  }
-
-  public AggregateInfo getMergeAggInfo() { return mergeAggInfo_; }
-  public AggregateInfo getSecondPhaseDistinctAggInfo() {
-    return secondPhaseDistinctAggInfo_;
-  }
-  public AggPhase getAggPhase() { return aggPhase_; }
-  public boolean isMerge() { return aggPhase_.isMerge(); }
-  public boolean isDistinctAgg() { return secondPhaseDistinctAggInfo_ != null; }
-  public ExprSubstitutionMap getIntermediateSmap() { return intermediateTupleSmap_; }
-  public ExprSubstitutionMap getOutputSmap() { return outputTupleSmap_; }
-  public ExprSubstitutionMap getOutputToIntermediateSmap() {
-    return outputToIntermediateTupleSmap_;
-  }
-
-  public boolean hasAggregateExprs() {
-    return !aggregateExprs_.isEmpty() ||
-        (secondPhaseDistinctAggInfo_ != null &&
-         !secondPhaseDistinctAggInfo_.getAggregateExprs().isEmpty());
-  }
-
-  /**
-   * Return the tuple id produced in the final aggregation step.
-   */
-  public TupleId getResultTupleId() {
-    if (isDistinctAgg()) return secondPhaseDistinctAggInfo_.getOutputTupleId();
-    return getOutputTupleId();
-  }
-
-  public ArrayList<FunctionCallExpr> getMaterializedAggregateExprs() {
-    ArrayList<FunctionCallExpr> result = Lists.newArrayList();
-    for (Integer i: materializedSlots_) {
-      result.add(aggregateExprs_.get(i));
-    }
-    return result;
-  }
-
-  /**
-   * Append ids of all slots that are being referenced in the process
-   * of performing the aggregate computation described by this AggregateInfo.
-   */
-  public void getRefdSlots(List<SlotId> ids) {
-    Preconditions.checkState(outputTupleDesc_ != null);
-    if (groupingExprs_ != null) {
-      Expr.getIds(groupingExprs_, null, ids);
-    }
-    Expr.getIds(aggregateExprs_, null, ids);
-    // The backend assumes that the entire aggTupleDesc is materialized
-    for (int i = 0; i < outputTupleDesc_.getSlots().size(); ++i) {
-      ids.add(outputTupleDesc_.getSlots().get(i).getId());
-    }
-  }
-
-  /**
-   * Substitute all the expressions (grouping expr, aggregate expr) and update our
-   * substitution map according to the given substitution map:
-   * - smap typically maps from tuple t1 to tuple t2 (example: the smap of an
-   *   inline view maps the virtual table ref t1 into a base table ref t2)
-   * - our grouping and aggregate exprs need to be substituted with the given
-   *   smap so that they also reference t2
-   * - aggTupleSMap needs to be recomputed to map exprs based on t2
-   *   onto our aggTupleDesc (ie, the left-hand side needs to be substituted with
-   *   smap)
-   * - mergeAggInfo: this is not affected, because
-   *   * its grouping and aggregate exprs only reference aggTupleDesc_
-   *   * its smap is identical to aggTupleSMap_
-   * - 2ndPhaseDistinctAggInfo:
-   *   * its grouping and aggregate exprs also only reference aggTupleDesc_
-   *     and are therefore not affected
-   *   * its smap needs to be recomputed to map exprs based on t2 to its own
-   *     aggTupleDesc
-   */
-  public void substitute(ExprSubstitutionMap smap, Analyzer analyzer)
-      throws InternalException {
-    groupingExprs_ = Expr.substituteList(groupingExprs_, smap, analyzer, false);
-    LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
-
-    // The smap in this case should not substitute the aggs themselves, only
-    // their subexpressions.
-    List<Expr> substitutedAggs =
-        Expr.substituteList(aggregateExprs_, smap, analyzer, false);
-    aggregateExprs_.clear();
-    for (Expr substitutedAgg: substitutedAggs) {
-      aggregateExprs_.add((FunctionCallExpr) substitutedAgg);
-    }
-
-    LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
-    outputTupleSmap_.substituteLhs(smap, analyzer);
-    intermediateTupleSmap_.substituteLhs(smap, analyzer);
-    if (secondPhaseDistinctAggInfo_ != null) {
-      secondPhaseDistinctAggInfo_.substitute(smap, analyzer);
-    }
-  }
-
-  /**
-   * Create the info for an aggregation node that merges its pre-aggregated inputs:
-   * - pre-aggregation is computed by 'this'
-   * - tuple desc and smap are the same as that of the input (we're materializing
-   *   the same logical tuple)
-   * - grouping exprs: slotrefs to the input's grouping slots
-   * - aggregate exprs: aggregation of the input's aggregateExprs slots
-   *
-   * The returned AggregateInfo shares its descriptor and smap with the input info;
-   * createAggTupleDesc() must not be called on it.
-   */
-  private void createMergeAggInfo(Analyzer analyzer) {
-    Preconditions.checkState(mergeAggInfo_ == null);
-    TupleDescriptor inputDesc = intermediateTupleDesc_;
-    // construct grouping exprs
-    ArrayList<Expr> groupingExprs = Lists.newArrayList();
-    for (int i = 0; i < getGroupingExprs().size(); ++i) {
-      SlotRef slotRef = new SlotRef(inputDesc.getSlots().get(i));
-      groupingExprs.add(slotRef);
-    }
-
-    // construct agg exprs
-    ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
-    for (int i = 0; i < getAggregateExprs().size(); ++i) {
-      FunctionCallExpr inputExpr = getAggregateExprs().get(i);
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      Expr aggExprParam =
-          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
-      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
-          inputExpr, Lists.newArrayList(aggExprParam));
-      aggExpr.analyzeNoThrow(analyzer);
-      aggExprs.add(aggExpr);
-    }
-
-    AggPhase aggPhase =
-        (aggPhase_ == AggPhase.FIRST) ? AggPhase.FIRST_MERGE : AggPhase.SECOND_MERGE;
-    mergeAggInfo_ = new AggregateInfo(groupingExprs, aggExprs, aggPhase);
-    mergeAggInfo_.intermediateTupleDesc_ = intermediateTupleDesc_;
-    mergeAggInfo_.outputTupleDesc_ = outputTupleDesc_;
-    mergeAggInfo_.intermediateTupleSmap_ = intermediateTupleSmap_;
-    mergeAggInfo_.outputTupleSmap_ = outputTupleSmap_;
-    mergeAggInfo_.materializedSlots_ = materializedSlots_;
-  }
-
-  /**
-   * Creates an IF function call that returns NULL if any of the slots
-   * at indexes [firstIdx, lastIdx] return NULL.
-   * For example, the resulting IF function would like this for 3 slots:
-   * IF(IsNull(slot1), NULL, IF(IsNull(slot2), NULL, slot3))
-   * Returns null if firstIdx is greater than lastIdx.
-   * Returns a SlotRef to the last slot if there is only one slot in range.
-   */
-  private Expr createCountDistinctAggExprParam(int firstIdx, int lastIdx,
-      ArrayList<SlotDescriptor> slots) {
-    if (firstIdx > lastIdx) return null;
-
-    Expr elseExpr = new SlotRef(slots.get(lastIdx));
-    if (firstIdx == lastIdx) return elseExpr;
-
-    for (int i = lastIdx - 1; i >= firstIdx; --i) {
-      ArrayList<Expr> ifArgs = Lists.newArrayList();
-      SlotRef slotRef = new SlotRef(slots.get(i));
-      // Build expr: IF(IsNull(slotRef), NULL, elseExpr)
-      Expr isNullPred = new IsNullPredicate(slotRef, false);
-      ifArgs.add(isNullPred);
-      ifArgs.add(new NullLiteral());
-      ifArgs.add(elseExpr);
-      elseExpr = new FunctionCallExpr("if", ifArgs);
-    }
-    return elseExpr;
-  }
-
-  /**
-   * Create the info for an aggregation node that computes the second phase of
-   * DISTINCT aggregate functions.
-   * (Refer to createDistinctAggInfo() for an explanation of the phases.)
-   * - 'this' is the phase 1 aggregation
-   * - grouping exprs are those of the original query (param origGroupingExprs)
-   * - aggregate exprs for the DISTINCT agg fns: these are aggregating the grouping
-   *   slots that were added to the original grouping slots in phase 1;
-   *   count is mapped to count(*) and sum is mapped to sum
-   * - other aggregate exprs: same as the non-DISTINCT merge case
-   *   (count is mapped to sum, everything else stays the same)
-   *
-   * This call also creates the tuple descriptor and smap for the returned AggregateInfo.
-   */
-  private void createSecondPhaseAggInfo(
-      ArrayList<Expr> origGroupingExprs,
-      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkState(secondPhaseDistinctAggInfo_ == null);
-    Preconditions.checkState(!distinctAggExprs.isEmpty());
-    // The output of the 1st phase agg is the 1st phase intermediate.
-    TupleDescriptor inputDesc = intermediateTupleDesc_;
-
-    // construct agg exprs for original DISTINCT aggregate functions
-    // (these aren't part of aggExprs_)
-    ArrayList<FunctionCallExpr> secondPhaseAggExprs = Lists.newArrayList();
-    for (FunctionCallExpr inputExpr: distinctAggExprs) {
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      FunctionCallExpr aggExpr = null;
-      if (inputExpr.getFnName().getFunction().equals("count")) {
-        // COUNT(DISTINCT ...) ->
-        // COUNT(IF(IsNull(<agg slot 1>), NULL, IF(IsNull(<agg slot 2>), NULL, ...)))
-        // We need the nested IF to make sure that we do not count
-        // column-value combinations if any of the distinct columns are NULL.
-        // This behavior is consistent with MySQL.
-        Expr ifExpr = createCountDistinctAggExprParam(origGroupingExprs.size(),
-            origGroupingExprs.size() + inputExpr.getChildren().size() - 1,
-            inputDesc.getSlots());
-        Preconditions.checkNotNull(ifExpr);
-        ifExpr.analyzeNoThrow(analyzer);
-        aggExpr = new FunctionCallExpr("count", Lists.newArrayList(ifExpr));
-      } else if (inputExpr.getFnName().getFunction().equals("group_concat")) {
-        // Syntax: GROUP_CONCAT([DISTINCT] expression [, separator])
-        ArrayList<Expr> exprList = Lists.newArrayList();
-        // Add "expression" parameter. Need to get it from the inputDesc's slots so the
-        // tuple reference is correct.
-        exprList.add(new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size())));
-        // Check if user provided a custom separator
-        if (inputExpr.getChildren().size() == 2) exprList.add(inputExpr.getChild(1));
-        aggExpr = new FunctionCallExpr(inputExpr.getFnName(), exprList);
-      } else {
-        // SUM(DISTINCT <expr>) -> SUM(<last grouping slot>);
-        // (MIN(DISTINCT ...) and MAX(DISTINCT ...) have their DISTINCT turned
-        // off during analysis, and AVG() is changed to SUM()/COUNT())
-        Expr aggExprParam =
-            new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size()));
-        aggExpr = new FunctionCallExpr(inputExpr.getFnName(),
-            Lists.newArrayList(aggExprParam));
-      }
-      secondPhaseAggExprs.add(aggExpr);
-    }
-
-    // map all the remaining agg fns
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      FunctionCallExpr inputExpr = aggregateExprs_.get(i);
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      // we're aggregating an intermediate slot of the 1st agg phase
-      Expr aggExprParam =
-          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
-      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
-          inputExpr, Lists.newArrayList(aggExprParam));
-      secondPhaseAggExprs.add(aggExpr);
-    }
-    Preconditions.checkState(
-        secondPhaseAggExprs.size() == aggregateExprs_.size() + distinctAggExprs.size());
-
-    for (FunctionCallExpr aggExpr: secondPhaseAggExprs) {
-      aggExpr.analyzeNoThrow(analyzer);
-      Preconditions.checkState(aggExpr.isAggregateFunction());
-    }
-
-    ArrayList<Expr> substGroupingExprs =
-        Expr.substituteList(origGroupingExprs, intermediateTupleSmap_, analyzer, false);
-    secondPhaseDistinctAggInfo_ =
-        new AggregateInfo(substGroupingExprs, secondPhaseAggExprs, AggPhase.SECOND);
-    secondPhaseDistinctAggInfo_.createTupleDescs(analyzer);
-    secondPhaseDistinctAggInfo_.createSecondPhaseAggSMap(this, distinctAggExprs);
-    secondPhaseDistinctAggInfo_.createMergeAggInfo(analyzer);
-  }
-
-  /**
-   * Create smap to map original grouping and aggregate exprs onto output
-   * of secondPhaseDistinctAggInfo.
-   */
-  private void createSecondPhaseAggSMap(
-      AggregateInfo inputAggInfo, ArrayList<FunctionCallExpr> distinctAggExprs) {
-    outputTupleSmap_.clear();
-    int slotIdx = 0;
-    ArrayList<SlotDescriptor> slotDescs = outputTupleDesc_.getSlots();
-
-    int numDistinctParams = distinctAggExprs.get(0).getChildren().size();
-    // If we are counting distinct params of group_concat, we cannot include the custom
-    // separator since it is not a distinct param.
-    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
-        "group_concat")
-        && numDistinctParams == 2) {
-      --numDistinctParams;
-    }
-    int numOrigGroupingExprs =
-        inputAggInfo.getGroupingExprs().size() - numDistinctParams;
-    Preconditions.checkState(slotDescs.size() ==
-        numOrigGroupingExprs + distinctAggExprs.size() +
-        inputAggInfo.getAggregateExprs().size());
-
-    // original grouping exprs -> first m slots
-    for (int i = 0; i < numOrigGroupingExprs; ++i, ++slotIdx) {
-      Expr groupingExpr = inputAggInfo.getGroupingExprs().get(i);
-      outputTupleSmap_.put(
-          groupingExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
-    }
-
-    // distinct agg exprs -> next n slots
-    for (int i = 0; i < distinctAggExprs.size(); ++i, ++slotIdx) {
-      Expr aggExpr = distinctAggExprs.get(i);
-      outputTupleSmap_.put(
-          aggExpr.clone(), (new SlotRef(slotDescs.get(slotIdx))));
-    }
-
-    // remaining agg exprs -> remaining slots
-    for (int i = 0; i < inputAggInfo.getAggregateExprs().size(); ++i, ++slotIdx) {
-      Expr aggExpr = inputAggInfo.getAggregateExprs().get(i);
-      outputTupleSmap_.put(aggExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
-    }
-  }
-
-  /**
-   * Populates the output and intermediate smaps based on the output and intermediate
-   * tuples that are assumed to be set. If an intermediate tuple is required, also
-   * populates the output-to-intermediate smap and registers auxiliary equivalence
-   * predicates between the grouping slots of the two tuples.
-   */
-  public void createSmaps(Analyzer analyzer) {
-    Preconditions.checkNotNull(outputTupleDesc_);
-    Preconditions.checkNotNull(intermediateTupleDesc_);
-
-    List<Expr> exprs = Lists.newArrayListWithCapacity(
-        groupingExprs_.size() + aggregateExprs_.size());
-    exprs.addAll(groupingExprs_);
-    exprs.addAll(aggregateExprs_);
-    for (int i = 0; i < exprs.size(); ++i) {
-      outputTupleSmap_.put(exprs.get(i).clone(),
-          new SlotRef(outputTupleDesc_.getSlots().get(i)));
-      if (!requiresIntermediateTuple()) continue;
-      intermediateTupleSmap_.put(exprs.get(i).clone(),
-          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      outputToIntermediateTupleSmap_.put(
-          new SlotRef(outputTupleDesc_.getSlots().get(i)),
-          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      if (i < groupingExprs_.size()) {
-        analyzer.createAuxEquivPredicate(
-            new SlotRef(outputTupleDesc_.getSlots().get(i)),
-            new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      }
-    }
-    if (!requiresIntermediateTuple()) intermediateTupleSmap_ = outputTupleSmap_;
-
-    LOG.trace("output smap=" + outputTupleSmap_.debugString());
-    LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
-  }
-
-  /**
-   * Mark slots required for this aggregation as materialized:
-   * - all grouping output slots as well as grouping exprs
-   * - for non-distinct aggregation: the aggregate exprs of materialized aggregate slots;
-   *   this assumes that the output slots corresponding to aggregate exprs have already
-   *   been marked by the consumer of this select block
-   * - for distinct aggregation, we mark all aggregate output slots in order to keep
-   *   things simple
-   * Also computes materializedAggregateExprs.
-   * This call must be idempotent because it may be called more than once for Union stmt.
-   */
-  @Override
-  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
-    for (int i = 0; i < groupingExprs_.size(); ++i) {
-      outputTupleDesc_.getSlots().get(i).setIsMaterialized(true);
-      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
-    }
-
-    // collect input exprs: grouping exprs plus aggregate exprs that need to be
-    // materialized
-    materializedSlots_.clear();
-    List<Expr> exprs = Lists.newArrayList();
-    exprs.addAll(groupingExprs_);
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      SlotDescriptor slotDesc =
-          outputTupleDesc_.getSlots().get(groupingExprs_.size() + i);
-      SlotDescriptor intermediateSlotDesc =
-          intermediateTupleDesc_.getSlots().get(groupingExprs_.size() + i);
-      if (isDistinctAgg()) {
-        slotDesc.setIsMaterialized(true);
-        intermediateSlotDesc.setIsMaterialized(true);
-      }
-      if (!slotDesc.isMaterialized()) continue;
-      intermediateSlotDesc.setIsMaterialized(true);
-      exprs.add(aggregateExprs_.get(i));
-      materializedSlots_.add(i);
-    }
-    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
-    analyzer.materializeSlots(resolvedExprs);
-
-    if (isDistinctAgg()) {
-      secondPhaseDistinctAggInfo_.materializeRequiredSlots(analyzer, null);
-    }
-  }
-
-  /**
-   * Checks if all materialized aggregate expressions have distinct semantics.
-   * It returns true if either of the following is true:
-   * (1) all materialized aggregate expressions have distinct semantics
-   *     (e.g. MIN, MAX, NDV). In other words, this optimization will work
-   *     for COUNT(DISTINCT c) but not COUNT(c).
-   * (2) there are no aggregate expressions but only grouping expressions.
-   */
-  public boolean hasAllDistinctAgg() {
-    if (hasAggregateExprs()) {
-      for (FunctionCallExpr aggExpr : getMaterializedAggregateExprs()) {
-        if (!aggExpr.isDistinct() && !aggExpr.ignoresDistinct()) return false;
-      }
-    } else {
-      Preconditions.checkState(!groupingExprs_.isEmpty());
-    }
-    return true;
-  }
-
-  /**
-   * Validates the internal state of this agg info: Checks that the number of
-   * materialized slots of the output tuple corresponds to the number of materialized
-   * aggregate functions plus the number of grouping exprs. Also checks that the return
-   * types of the aggregate and grouping exprs correspond to the slots in the output
-   * tuple.
-   */
-  public void checkConsistency() {
-    ArrayList<SlotDescriptor> slots = outputTupleDesc_.getSlots();
-
-    // Check materialized slots.
-    int numMaterializedSlots = 0;
-    for (SlotDescriptor slotDesc: slots) {
-      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
-    }
-    Preconditions.checkState(numMaterializedSlots ==
-        materializedSlots_.size() + groupingExprs_.size());
-
-    // Check that grouping expr return types match the slot descriptors.
-    int slotIdx = 0;
-    for (int i = 0; i < groupingExprs_.size(); ++i) {
-      Expr groupingExpr = groupingExprs_.get(i);
-      Type slotType = slots.get(slotIdx).getType();
-      Preconditions.checkState(groupingExpr.getType().equals(slotType),
-          String.format("Grouping expr %s returns type %s but its output tuple " +
-              "slot has type %s", groupingExpr.toSql(),
-              groupingExpr.getType().toString(), slotType.toString()));
-      ++slotIdx;
-    }
-    // Check that aggregate expr return types match the slot descriptors.
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      Expr aggExpr = aggregateExprs_.get(i);
-      Type slotType = slots.get(slotIdx).getType();
-      Preconditions.checkState(aggExpr.getType().equals(slotType),
-          String.format("Agg expr %s returns type %s but its output tuple " +
-              "slot has type %s", aggExpr.toSql(), aggExpr.getType().toString(),
-              slotType.toString()));
-      ++slotIdx;
-    }
-  }
-
-  /**
-   * Returns DataPartition derived from grouping exprs.
-   * Returns unpartitioned spec if no grouping.
-   * TODO: this won't work when we start supporting range partitions,
-   * because we could derive both hash and order-based partitions
-   */
-  public DataPartition getPartition() {
-    if (groupingExprs_.isEmpty()) {
-      return DataPartition.UNPARTITIONED;
-    } else {
-      return DataPartition.hashPartitioned(groupingExprs_);
-    }
-  }
-
-  @Override
-  public String debugString() {
-    StringBuilder out = new StringBuilder(super.debugString());
-    out.append(Objects.toStringHelper(this)
-        .add("phase", aggPhase_)
-        .add("intermediate_smap", intermediateTupleSmap_.debugString())
-        .add("output_smap", outputTupleSmap_.debugString())
-        .toString());
-    if (mergeAggInfo_ != this && mergeAggInfo_ != null) {
-      out.append("\nmergeAggInfo:\n" + mergeAggInfo_.debugString());
-    }
-    if (secondPhaseDistinctAggInfo_ != null) {
-      out.append("\nsecondPhaseDistinctAggInfo:\n"
-          + secondPhaseDistinctAggInfo_.debugString());
-    }
-    return out.toString();
-  }
-
-  @Override
-  protected String tupleDebugName() { return "agg-tuple"; }
-
-  @Override
-  public AggregateInfo clone() { return new AggregateInfo(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
deleted file mode 100644
index f3ad3f8..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
+++ /dev/null
@@ -1,221 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.Type;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Base class for AggregateInfo and AnalyticInfo containing the intermediate and output
- * tuple descriptors as well as their smaps for evaluating aggregate functions.
- */
-public abstract class AggregateInfoBase {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AggregateInfoBase.class);
-
-  // For aggregations: All unique grouping expressions from a select block.
-  // For analytics: Empty.
-  protected ArrayList<Expr> groupingExprs_;
-
-  // For aggregations: All unique aggregate expressions from a select block.
-  // For analytics: The results of AnalyticExpr.getFnCall() for the unique
-  // AnalyticExprs of a select block.
-  protected ArrayList<FunctionCallExpr> aggregateExprs_;
-
-  // The tuple into which the intermediate output of an aggregation is materialized.
-  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
-  // contain the values of the grouping exprs, followed by slots into which the
-  // aggregateExprs' update()/merge() symbols materialize their output, i.e., slots
-  // of the aggregate functions' intermediate types.
-  // Identical to outputTupleDesc_ if no aggregateExpr has an output type that is
-  // different from its intermediate type.
-  protected TupleDescriptor intermediateTupleDesc_;
-
-  // The tuple into which the final output of the aggregation is materialized.
-  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
-  // contain the values of the grouping exprs, followed by slots into which the
-  // aggregateExprs' finalize() symbol write its result, i.e., slots of the aggregate
-  // functions' output types.
-  protected TupleDescriptor outputTupleDesc_;
-
-  // For aggregation: indices into aggregate exprs for that need to be materialized
-  // For analytics: indices into the analytic exprs and their corresponding aggregate
-  // exprs that need to be materialized.
-  // Populated in materializeRequiredSlots() which must be implemented by subclasses.
-  protected ArrayList<Integer> materializedSlots_ = Lists.newArrayList();
-
-  protected AggregateInfoBase(ArrayList<Expr> groupingExprs,
-      ArrayList<FunctionCallExpr> aggExprs)  {
-    Preconditions.checkState(groupingExprs != null || aggExprs != null);
-    groupingExprs_ =
-        groupingExprs != null ? Expr.cloneList(groupingExprs) : new ArrayList<Expr>();
-    Preconditions.checkState(aggExprs != null || !(this instanceof AnalyticInfo));
-    aggregateExprs_ =
-        aggExprs != null ? Expr.cloneList(aggExprs) : new ArrayList<FunctionCallExpr>();
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected AggregateInfoBase(AggregateInfoBase other) {
-    groupingExprs_ =
-        (other.groupingExprs_ != null) ? Expr.cloneList(other.groupingExprs_) : null;
-    aggregateExprs_ =
-        (other.aggregateExprs_ != null) ? Expr.cloneList(other.aggregateExprs_) : null;
-    intermediateTupleDesc_ = other.intermediateTupleDesc_;
-    outputTupleDesc_ = other.outputTupleDesc_;
-    materializedSlots_ = Lists.newArrayList(other.materializedSlots_);
-  }
-
-  /**
-   * Creates the intermediate and output tuple descriptors. If no agg expr has an
-   * intermediate type different from its output type, then only the output tuple
-   * descriptor is created and the intermediate tuple is set to the output tuple.
-   */
-  protected void createTupleDescs(Analyzer analyzer) {
-    // Create the intermediate tuple desc first, so that the tuple ids are increasing
-    // from bottom to top in the plan tree.
-    intermediateTupleDesc_ = createTupleDesc(analyzer, false);
-    if (requiresIntermediateTuple(aggregateExprs_)) {
-      outputTupleDesc_ = createTupleDesc(analyzer, true);
-    } else {
-      outputTupleDesc_ = intermediateTupleDesc_;
-    }
-  }
-
-  /**
-   * Returns a tuple descriptor for the aggregation/analytic's intermediate or final
-   * result, depending on whether isOutputTuple is true or false.
-   * Also updates the appropriate substitution map, and creates and registers auxiliary
-   * equality predicates between the grouping slots and the grouping exprs.
-   */
-  private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple) {
-    TupleDescriptor result =
-        analyzer.getDescTbl().createTupleDescriptor(
-          tupleDebugName() + (isOutputTuple ? "-out" : "-intermed"));
-    List<Expr> exprs = Lists.newArrayListWithCapacity(
-        groupingExprs_.size() + aggregateExprs_.size());
-    exprs.addAll(groupingExprs_);
-    exprs.addAll(aggregateExprs_);
-
-    int aggregateExprStartIndex = groupingExprs_.size();
-    for (int i = 0; i < exprs.size(); ++i) {
-      Expr expr = exprs.get(i);
-      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(result);
-      slotDesc.initFromExpr(expr);
-      if (i < aggregateExprStartIndex) {
-        // register equivalence between grouping slot and grouping expr;
-        // do this only when the grouping expr isn't a constant, otherwise
-        // it'll simply show up as a gratuitous HAVING predicate
-        // (which would actually be incorrect if the constant happens to be NULL)
-        if (!expr.isConstant()) {
-          analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
-        }
-      } else {
-        Preconditions.checkArgument(expr instanceof FunctionCallExpr);
-        FunctionCallExpr aggExpr = (FunctionCallExpr)expr;
-        if (aggExpr.isMergeAggFn()) {
-          slotDesc.setLabel(aggExpr.getChild(0).toSql());
-          slotDesc.setSourceExpr(aggExpr.getChild(0));
-        } else {
-          slotDesc.setLabel(aggExpr.toSql());
-          slotDesc.setSourceExpr(aggExpr);
-        }
-
-        // count(*) is non-nullable.
-        if (aggExpr.getFnName().getFunction().equals("count")) {
-          // TODO: Consider making nullability a property of types or of builtin agg fns.
-          // row_number, rank, and dense_rank are non-nullable as well.
-          slotDesc.setIsNullable(false);
-        }
-        if (!isOutputTuple) {
-          Type intermediateType = ((AggregateFunction)aggExpr.fn_).getIntermediateType();
-          if (intermediateType != null) {
-            // Use the output type as intermediate if the function has a wildcard decimal.
-            if (!intermediateType.isWildcardDecimal()) {
-              slotDesc.setType(intermediateType);
-            } else {
-              Preconditions.checkState(expr.getType().isDecimal());
-            }
-          }
-        }
-      }
-    }
-    String prefix = (isOutputTuple ? "result " : "intermediate ");
-    LOG.trace(prefix + " tuple=" + result.debugString());
-    return result;
-  }
-
-  /**
-   * Marks the slots required for evaluating an Analytic/AggregateInfo by
-   * resolving the materialized aggregate/analytic exprs against smap,
-   * and then marking their slots.
-   */
-  public abstract void materializeRequiredSlots(Analyzer analyzer,
-      ExprSubstitutionMap smap);
-
-  public ArrayList<Expr> getGroupingExprs() { return groupingExprs_; }
-  public ArrayList<FunctionCallExpr> getAggregateExprs() { return aggregateExprs_; }
-  public TupleDescriptor getOutputTupleDesc() { return outputTupleDesc_; }
-  public TupleDescriptor getIntermediateTupleDesc() { return intermediateTupleDesc_; }
-  public TupleId getIntermediateTupleId() { return intermediateTupleDesc_.getId(); }
-  public TupleId getOutputTupleId() { return outputTupleDesc_.getId(); }
-  public boolean requiresIntermediateTuple() {
-    Preconditions.checkNotNull(intermediateTupleDesc_);
-    Preconditions.checkNotNull(outputTupleDesc_);
-    return intermediateTupleDesc_ != outputTupleDesc_;
-  }
-
-  /**
-   * Returns true if evaluating the given aggregate exprs requires an intermediate tuple,
-   * i.e., whether one of the aggregate functions has an intermediate type different from
-   * its output type.
-   */
-  public static <T extends Expr> boolean requiresIntermediateTuple(List<T> aggExprs) {
-    for (Expr aggExpr: aggExprs) {
-      Type intermediateType = ((AggregateFunction) aggExpr.fn_).getIntermediateType();
-      if (intermediateType != null) return true;
-    }
-    return false;
-  }
-
-  public String debugString() {
-    StringBuilder out = new StringBuilder();
-    out.append(Objects.toStringHelper(this)
-        .add("grouping_exprs", Expr.debugString(groupingExprs_))
-        .add("aggregate_exprs", Expr.debugString(aggregateExprs_))
-        .add("intermediate_tuple", (intermediateTupleDesc_ == null)
-            ? "null" : intermediateTupleDesc_.debugString())
-        .add("output_tuple", (outputTupleDesc_ == null)
-            ? "null" : outputTupleDesc_.debugString())
-        .toString());
-    return out.toString();
-  }
-
-  protected abstract String tupleDebugName();
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
deleted file mode 100644
index a5cb2ca..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
+++ /dev/null
@@ -1,117 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.permission.FsAction;
-
-/**
- * Represents an ALTER TABLE ADD PARTITION statement.
- */
-public class AlterTableAddPartitionStmt extends AlterTableStmt {
-  private final HdfsUri location_;
-  private final boolean ifNotExists_;
-  private final PartitionSpec partitionSpec_;
-  private final HdfsCachingOp cacheOp_;
-
-  public AlterTableAddPartitionStmt(TableName tableName,
-      PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
-      HdfsCachingOp cacheOp) {
-    super(tableName);
-    Preconditions.checkState(partitionSpec != null);
-    location_ = location;
-    ifNotExists_ = ifNotExists;
-    partitionSpec_ = partitionSpec;
-    partitionSpec_.setTableName(tableName);
-    cacheOp_ = cacheOp;
-  }
-
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public HdfsUri getLocation() { return location_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
-    sb.append(" ADD ");
-    if (ifNotExists_) {
-      sb.append("IF NOT EXISTS ");
-    }
-    sb.append(" " + partitionSpec_.toSql());
-    if (location_ != null) {
-      sb.append(String.format(" LOCATION '%s'", location_));
-    }
-    if (cacheOp_ != null) sb.append(cacheOp_.toSql());
-    return sb.toString();
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_PARTITION);
-    TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
-    addPartParams.setPartition_spec(partitionSpec_.toThrift());
-    addPartParams.setLocation(location_ == null ? null : location_.toString());
-    addPartParams.setIf_not_exists(ifNotExists_);
-    if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
-    params.setAdd_partition_params(addPartParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    }
-
-    boolean shouldCache = false;
-    Table table = getTargetTable();
-    if (cacheOp_ != null) {
-      cacheOp_.analyze(analyzer);
-      shouldCache = cacheOp_.shouldCache();
-    } else if (table instanceof HdfsTable) {
-      shouldCache = ((HdfsTable)table).isMarkedCached();
-    }
-    if (shouldCache) {
-      if (!(table instanceof HdfsTable)) {
-        throw new AnalysisException("Caching must target a HDFS table: " +
-            table.getFullName());
-      }
-      HdfsTable hdfsTable = (HdfsTable)table;
-      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
-          (location_ == null && !hdfsTable.isLocationCacheable())) {
-        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
-            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
-            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
-            table.getFullName()));
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
deleted file mode 100644
index aaa223a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ /dev/null
@@ -1,108 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
- */
-public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
-  private final List<ColumnDef> columnDefs_;
-  private final boolean replaceExistingCols_;
-
-  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
-      boolean replaceExistingCols) {
-    super(tableName);
-    Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
-    columnDefs_ = Lists.newArrayList(columnDefs);
-    replaceExistingCols_ = replaceExistingCols;
-  }
-
-  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
-
-  // Replace columns instead of appending new columns.
-  public boolean getReplaceExistingCols() {
-    return replaceExistingCols_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
-    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
-    for (ColumnDef col: getColumnDescs()) {
-      colParams.addToColumns(col.toThrift());
-    }
-    colParams.setReplace_existing_cols(replaceExistingCols_);
-    params.setAdd_replace_cols_params(colParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
-          "supported on HBase tables.");
-    }
-
-    // Build a set of the partition keys for the table.
-    Set<String> existingPartitionKeys = Sets.newHashSet();
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      existingPartitionKeys.add(fs.getName().toLowerCase());
-    }
-
-    // Make sure the new columns don't already exist in the table, that the names
-    // are all valid and unique, and that none of the columns conflict with
-    // partition columns.
-    Set<String> colNames = Sets.newHashSet();
-    for (ColumnDef c: columnDefs_) {
-      c.analyze();
-      String colName = c.getColName().toLowerCase();
-      if (existingPartitionKeys.contains(colName)) {
-        throw new AnalysisException(
-            "Column name conflicts with existing partition column: " + colName);
-      }
-
-      Column col = t.getColumn(colName);
-      if (col != null && !replaceExistingCols_) {
-        throw new AnalysisException("Column already exists: " + colName);
-      } else if (!colNames.add(colName)) {
-        throw new AnalysisException("Duplicate column name: " + colName);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
deleted file mode 100644
index c733ca0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
+++ /dev/null
@@ -1,101 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableChangeColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE CHANGE COLUMN colName newColDef statement.
- * Note: It would be fairly simple to reuse this class to support ALTER TABLE MODIFY
- * newColDef statements in the future my making colName optional.
- */
-public class AlterTableChangeColStmt extends AlterTableStmt {
-  private final String colName_;
-  private final ColumnDef newColDef_;
-
-  public AlterTableChangeColStmt(TableName tableName, String colName,
-      ColumnDef newColDef) {
-    super(tableName);
-    Preconditions.checkNotNull(newColDef);
-    Preconditions.checkState(colName != null && !colName.isEmpty());
-    colName_ = colName;
-    newColDef_ = newColDef;
-  }
-
-  public String getColName() { return colName_; }
-  public ColumnDef getNewColDef() { return newColDef_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.CHANGE_COLUMN);
-    TAlterTableChangeColParams colParams = new TAlterTableChangeColParams();
-    colParams.setCol_name(colName_);
-    colParams.setNew_col_def(newColDef_.toThrift());
-    params.setChange_col_params(colParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE CHANGE COLUMN not currently supported " +
-          "on HBase tables.");
-    }
-    String tableName = getDb() + "." + getTbl();
-
-    // Verify there are no conflicts with partition columns.
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
-        throw new AnalysisException("Cannot modify partition column: " + colName_);
-      }
-      if (fs.getName().toLowerCase().equals(newColDef_.getColName().toLowerCase())) {
-        throw new AnalysisException(
-            "Column name conflicts with existing partition column: " +
-            newColDef_.getColName());
-      }
-    }
-
-    // Verify the column being modified exists in the table
-    if (t.getColumn(colName_) == null) {
-      throw new AnalysisException(String.format(
-          "Column '%s' does not exist in table: %s", colName_, tableName));
-    }
-
-    // Check that the new column def's name is valid.
-    newColDef_.analyze();
-    // Verify that if the column name is being changed, the new name doesn't conflict
-    // with an existing column.
-    if (!colName_.toLowerCase().equals(newColDef_.getColName().toLowerCase()) &&
-        t.getColumn(newColDef_.getColName()) != null) {
-      throw new AnalysisException("Column already exists: " + newColDef_.getColName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
deleted file mode 100644
index d7f5ab5..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
+++ /dev/null
@@ -1,84 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE DROP COLUMN statement.
- * Note: Hive does not support this syntax for droppping columns, but it is supported
- * by mysql.
- */
-public class AlterTableDropColStmt extends AlterTableStmt {
-  private final String colName_;
-
-  public AlterTableDropColStmt(TableName tableName, String colName) {
-    super(tableName);
-    Preconditions.checkState(colName != null && !colName.isEmpty());
-    colName_ = colName;
-  }
-
-  public String getColName() { return colName_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.DROP_COLUMN);
-    TAlterTableDropColParams dropColParams = new TAlterTableDropColParams(colName_);
-    params.setDrop_col_params(dropColParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE DROP COLUMN not currently supported " +
-          "on HBase tables.");
-    }
-    String tableName = getDb() + "." + getTbl();
-
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
-        throw new AnalysisException("Cannot drop partition column: " + fs.getName());
-      }
-    }
-
-    if (t.getColumns().size() - t.getMetaStoreTable().getPartitionKeysSize() <= 1) {
-      throw new AnalysisException(String.format(
-          "Cannot drop column '%s' from %s. Tables must contain at least 1 column.",
-          colName_, tableName));
-    }
-
-    if (t.getColumn(colName_) == null) {
-      throw new AnalysisException(String.format(
-          "Column '%s' does not exist in table: %s", colName_, tableName));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
deleted file mode 100644
index f8bc09c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE DROP PARTITION statement.
- */
-public class AlterTableDropPartitionStmt extends AlterTableStmt {
-  private final boolean ifExists_;
-  private final PartitionSpec partitionSpec_;
-
-  // Setting this value causes dropped partition(s) to be permanently
-  // deleted. For example, for HDFS tables it skips the trash mechanism
-  private final boolean purgePartition_;
-
-  public AlterTableDropPartitionStmt(TableName tableName,
-      PartitionSpec partitionSpec, boolean ifExists, boolean purgePartition) {
-    super(tableName);
-    Preconditions.checkNotNull(partitionSpec);
-    partitionSpec_ = partitionSpec;
-    partitionSpec_.setTableName(tableName);
-    ifExists_ = ifExists;
-    purgePartition_ = purgePartition;
-  }
-
-  public boolean getIfNotExists() { return ifExists_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
-    sb.append(" DROP ");
-    if (ifExists_) sb.append("IF EXISTS ");
-    sb.append(" DROP " + partitionSpec_.toSql());
-    if (purgePartition_) sb.append(" PURGE");
-    return sb.toString();
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.DROP_PARTITION);
-    TAlterTableDropPartitionParams addPartParams = new TAlterTableDropPartitionParams();
-    addPartParams.setPartition_spec(partitionSpec_.toThrift());
-    addPartParams.setIf_exists(ifExists_);
-    addPartParams.setPurge(purgePartition_);
-    params.setDrop_partition_params(addPartParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (!ifExists_) partitionSpec_.setPartitionShouldExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
deleted file mode 100644
index 009535c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE/VIEW RENAME statement.
- */
-public class AlterTableOrViewRenameStmt extends AlterTableStmt {
-  protected final TableName newTableName_;
-
-  // Set during analysis
-  protected String newDbName_;
-
-  //  True if we are renaming a table. False if we are renaming a view.
-  protected final boolean renameTable_;
-
-  public AlterTableOrViewRenameStmt(TableName oldTableName, TableName newTableName,
-      boolean renameTable) {
-    super(oldTableName);
-    Preconditions.checkState(newTableName != null && !newTableName.isEmpty());
-    newTableName_ = newTableName;
-    renameTable_ = renameTable;
-  }
-
-  public String getNewTbl() {
-    return newTableName_.getTbl();
-  }
-
-  public String getNewDb() {
-    Preconditions.checkNotNull(newDbName_);
-    return newDbName_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(
-        (renameTable_) ? TAlterTableType.RENAME_TABLE : TAlterTableType.RENAME_VIEW);
-    TAlterTableOrViewRenameParams renameParams =
-        new TAlterTableOrViewRenameParams(new TTableName(getNewDb(), getNewTbl()));
-    params.setRename_params(renameParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    newTableName_.analyze();
-    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
-    if (table_ instanceof View && renameTable_) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
-    }
-    if (!(table_ instanceof View) && !renameTable_) {
-      throw new AnalysisException(String.format(
-          "ALTER VIEW not allowed on a table: %s", table_.getFullName()));
-    }
-    newDbName_ = analyzer.getTargetDbName(newTableName_);
-    if (analyzer.dbContainsTable(newDbName_, newTableName_.getTbl(), Privilege.CREATE)) {
-      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
-          String.format("%s.%s", newDbName_, getNewTbl()));
-    }
-    analyzer.addAccessEvent(new TAccessEvent(newDbName_ + "." + newTableName_.getTbl(),
-        table_.getCatalogObjectType(), Privilege.CREATE.toString()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
deleted file mode 100644
index c7e796c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-
-/**
- * Represents an ALTER TABLE RECOVER PARTITIONS statement.
- */
-public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
-
-  public AlterTableRecoverPartitionsStmt(TableName tableName) {
-    super(tableName);
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.RECOVER_PARTITIONS);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-
-    // Make sure the target table is HdfsTable.
-    if (!(table_ instanceof HdfsTable)) {
-      throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS " +
-          "must target an HDFS table: " + tableName_);
-    }
-
-    // Make sure the target table is partitioned.
-    if (table_.getMetaStoreTable().getPartitionKeysSize() == 0) {
-      throw new AnalysisException("Table is not partitioned: " + tableName_);
-    }
-  }
-}


[02/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/Expr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Expr.java b/fe/src/main/java/org/apache/impala/analysis/Expr.java
new file mode 100644
index 0000000..fdc5bf1
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/Expr.java
@@ -0,0 +1,1258 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.PrimitiveType;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.TreeNode;
+import com.cloudera.impala.thrift.TExpr;
+import com.cloudera.impala.thrift.TExprNode;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Root of the expr node hierarchy.
+ *
+ */
+abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneable {
+  private final static Logger LOG = LoggerFactory.getLogger(Expr.class);
+
+  // Limits on the number of expr children and the depth of an expr tree. These maximum
+  // values guard against crashes due to stack overflows (IMPALA-432) and were
+  // experimentally determined to be safe.
+  public final static int EXPR_CHILDREN_LIMIT = 10000;
+  // The expr depth limit is mostly due to our recursive implementation of clone().
+  public final static int EXPR_DEPTH_LIMIT = 1000;
+
+  // Name of the function that needs to be implemented by every Expr that
+  // supports negation.
+  private final static String NEGATE_FN = "negate";
+
+  // To be used where we cannot come up with a better estimate (selectivity_ is -1).
+  public static double DEFAULT_SELECTIVITY = 0.1;
+
+  // The relative costs of different Exprs. These numbers are not intended as a precise
+  // reflection of running times, but as simple heuristics for ordering Exprs from cheap
+  // to expensive.
+  // TODO(tmwarshall): Get these costs in a more principled way, eg. with a benchmark.
+  public final static float ARITHMETIC_OP_COST = 1;
+  public final static float BINARY_PREDICATE_COST = 1;
+  public final static float VAR_LEN_BINARY_PREDICATE_COST = 5;
+  public final static float CAST_COST = 1;
+  public final static float COMPOUND_PREDICATE_COST = 1;
+  public final static float FUNCTION_CALL_COST = 10;
+  public final static float IS_NOT_EMPTY_COST = 1;
+  public final static float IS_NULL_COST = 1;
+  public final static float LIKE_COST = 10;
+  public final static float LITERAL_COST = 1;
+  public final static float SLOT_REF_COST = 1;
+  public final static float TIMESTAMP_ARITHMETIC_COST = 5;
+
+  // To be used when estimating the cost of Exprs of type string where we don't otherwise
+  // have an estimate of how long the strings produced by that Expr are.
+  public final static int DEFAULT_AVG_STRING_LENGTH = 5;
+
+  // returns true if an Expr is a non-analytic aggregate.
+  private final static com.google.common.base.Predicate<Expr> isAggregatePredicate_ =
+      new com.google.common.base.Predicate<Expr>() {
+        public boolean apply(Expr arg) {
+          return arg instanceof FunctionCallExpr &&
+              ((FunctionCallExpr)arg).isAggregateFunction();
+        }
+      };
+
+  // Returns true if an Expr is a NOT CompoundPredicate.
+  public final static com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg instanceof CompoundPredicate &&
+              ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.NOT;
+        }
+      };
+
+  // Returns true if an Expr is an OR CompoundPredicate.
+  public final static com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg instanceof CompoundPredicate &&
+              ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.OR;
+        }
+      };
+
+  // Returns true if an Expr is a scalar subquery
+  public final static com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg.isScalarSubquery();
+        }
+      };
+
+  // Returns true if an Expr is an aggregate function that returns non-null on
+  // an empty set (e.g. count).
+  public final static com.google.common.base.Predicate<Expr>
+      NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg instanceof FunctionCallExpr &&
+              ((FunctionCallExpr)arg).returnsNonNullOnEmpty();
+        }
+      };
+
+  // Returns true if an Expr is a builtin aggregate function.
+  public final static com.google.common.base.Predicate<Expr> IS_BUILTIN_AGG_FN =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg instanceof FunctionCallExpr &&
+              ((FunctionCallExpr)arg).getFnName().isBuiltin();
+        }
+      };
+
+  public final static com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) {
+          return arg instanceof BoolLiteral && ((BoolLiteral)arg).getValue();
+        }
+      };
+
+  public final static com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) { return BinaryPredicate.getEqSlots(arg) != null; }
+      };
+
+  public final static com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
+      new com.google.common.base.Predicate<Expr>() {
+        @Override
+        public boolean apply(Expr arg) { return arg instanceof BinaryPredicate; }
+      };
+
+  // id that's unique across the entire query statement and is assigned by
+  // Analyzer.registerConjuncts(); only assigned for the top-level terms of a
+  // conjunction, and therefore null for most Exprs
+  protected ExprId id_;
+
+  // true if Expr is an auxiliary predicate that was generated by the plan generation
+  // process to facilitate predicate propagation;
+  // false if Expr originated with a query stmt directly
+  private boolean isAuxExpr_ = false;
+
+  protected Type type_;  // result of analysis
+  protected boolean isAnalyzed_;  // true after analyze() has been called
+  protected boolean isOnClauseConjunct_; // set by analyzer
+
+  // Flag to indicate whether to wrap this expr's toSql() in parenthesis. Set by parser.
+  // Needed for properly capturing expr precedences in the SQL string.
+  protected boolean printSqlInParens_ = false;
+
+  // Estimated probability of a predicate evaluating to true. Set during analysis.
+  // Between 0 and 1, or set to -1 if the selectivity could not be estimated.
+  protected double selectivity_;
+
+  // Estimated relative cost of evaluating this expression, including the costs of
+  // its children. Set during analysis and used to sort conjuncts within a PlanNode.
+  // Has a default value of -1 indicating unknown cost if the cost of this expression
+  // or any of its children was not set, but it is required to be set for any
+  // expression which may be part of a conjunct.
+  protected float evalCost_;
+
+  // estimated number of distinct values produced by Expr; invalid: -1
+  // set during analysis
+  protected long numDistinctValues_;
+
+  // The function to call. This can either be a scalar or aggregate function.
+  // Set in analyze().
+  protected Function fn_;
+
+  protected Expr() {
+    super();
+    type_ = Type.INVALID;
+    selectivity_ = -1.0;
+    evalCost_ = -1.0f;
+    numDistinctValues_ = -1;
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected Expr(Expr other) {
+    id_ = other.id_;
+    isAuxExpr_ = other.isAuxExpr_;
+    type_ = other.type_;
+    isAnalyzed_ = other.isAnalyzed_;
+    isOnClauseConjunct_ = other.isOnClauseConjunct_;
+    printSqlInParens_ = other.printSqlInParens_;
+    selectivity_ = other.selectivity_;
+    evalCost_ = other.evalCost_;
+    numDistinctValues_ = other.numDistinctValues_;
+    fn_ = other.fn_;
+    children_ = Expr.cloneList(other.children_);
+  }
+
+  public ExprId getId() { return id_; }
+  protected void setId(ExprId id) { id_ = id; }
+  public Type getType() { return type_; }
+  public double getSelectivity() { return selectivity_; }
+  public boolean hasSelectivity() { return selectivity_ >= 0; }
+  public float getCost() {
+    Preconditions.checkState(isAnalyzed_);
+    return evalCost_;
+  }
+  public boolean hasCost() { return evalCost_ >= 0; }
+  public long getNumDistinctValues() { return numDistinctValues_; }
+  public void setPrintSqlInParens(boolean b) { printSqlInParens_ = b; }
+  public boolean isOnClauseConjunct() { return isOnClauseConjunct_; }
+  public void setIsOnClauseConjunct(boolean b) { isOnClauseConjunct_ = b; }
+  public boolean isAuxExpr() { return isAuxExpr_; }
+  public boolean isRegisteredPredicate() { return id_ != null; }
+  public void setIsAuxExpr() { isAuxExpr_ = true; }
+  public Function getFn() { return fn_; }
+
+  /**
+   * Perform semantic analysis of node and all of its children.
+   * Throws exception if any errors found.
+   * @see com.cloudera.impala.parser.ParseNode#analyze(com.cloudera.impala.parser.Analyzer)
+   */
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    // Check the expr child limit.
+    if (children_.size() > EXPR_CHILDREN_LIMIT) {
+      String sql = toSql();
+      String sqlSubstr = sql.substring(0, Math.min(80, sql.length()));
+      throw new AnalysisException(String.format("Exceeded the maximum number of child " +
+          "expressions (%s).\nExpression has %s children:\n%s...",
+          EXPR_CHILDREN_LIMIT, children_.size(), sqlSubstr));
+    }
+
+    // analyzer may be null for certain literal constructions (e.g. IntLiteral).
+    if (analyzer != null) {
+      analyzer.incrementCallDepth();
+      // Check the expr depth limit. Do not print the toSql() to not overflow the stack.
+      if (analyzer.getCallDepth() > EXPR_DEPTH_LIMIT) {
+        throw new AnalysisException(String.format("Exceeded the maximum depth of an " +
+            "expression tree (%s).", EXPR_DEPTH_LIMIT));
+      }
+    }
+    for (Expr child: children_) {
+      child.analyze(analyzer);
+    }
+    isAnalyzed_ = true;
+    computeNumDistinctValues();
+
+    if (analyzer != null) analyzer.decrementCallDepth();
+  }
+
+  /**
+   * Helper function to analyze this expr and assert that the analysis was successful.
+   * TODO: This function could be used in many more places to clean up. Consider
+   * adding an IAnalyzable interface or similar to and move this helper into Analyzer
+   * such that non-Expr things can use the helper also.
+   */
+  public void analyzeNoThrow(Analyzer analyzer) {
+    try {
+      analyze(analyzer);
+    } catch (AnalysisException e) {
+      throw new IllegalStateException(e);
+    }
+  }
+
+  protected void computeNumDistinctValues() {
+    if (isConstant()) {
+      numDistinctValues_ = 1;
+    } else {
+      // if this Expr contains slotrefs, we estimate the # of distinct values
+      // to be the maximum such number for any of the slotrefs;
+      // the subclass analyze() function may well want to override this, if it
+      // knows better
+      List<SlotRef> slotRefs = Lists.newArrayList();
+      this.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
+      numDistinctValues_ = -1;
+      for (SlotRef slotRef: slotRefs) {
+        numDistinctValues_ = Math.max(numDistinctValues_, slotRef.numDistinctValues_);
+      }
+    }
+  }
+
+  /**
+   * Collects the returns types of the child nodes in an array.
+   */
+  protected Type[] collectChildReturnTypes() {
+    Type[] childTypes = new Type[children_.size()];
+    for (int i = 0; i < children_.size(); ++i) {
+      childTypes[i] = children_.get(i).type_;
+    }
+    return childTypes;
+  }
+
+  /**
+   * Looks up in the catalog the builtin for 'name' and 'argTypes'.
+   * Returns null if the function is not found.
+   */
+  protected Function getBuiltinFunction(Analyzer analyzer, String name,
+      Type[] argTypes, CompareMode mode) throws AnalysisException {
+    FunctionName fnName = new FunctionName(Catalog.BUILTINS_DB, name);
+    Function searchDesc = new Function(fnName, argTypes, Type.INVALID, false);
+    return analyzer.getCatalog().getFunction(searchDesc, mode);
+  }
+
+  /**
+   * Generates the necessary casts for the children of this expr to call fn_.
+   * child(0) is cast to the function's first argument, child(1) to the second etc.
+   * This does not do any validation and the casts are assumed to be safe.
+   *
+   * If ignoreWildcardDecimals is true, the function will not cast arguments that
+   * are wildcard decimals. This is used for builtins where the cast is done within
+   * the BE function.
+   * Otherwise, if the function signature contains wildcard decimals, each wildcard child
+   * argument will be cast to the highest resolution that can contain all of the child
+   * wildcard arguments.
+   * e.g. fn(decimal(*), decimal(*))
+   *      called with fn(decimal(10,2), decimal(5,3))
+   * both children will be cast to (11, 3).
+   */
+  protected void castForFunctionCall(boolean ignoreWildcardDecimals)
+      throws AnalysisException {
+    Preconditions.checkState(fn_ != null);
+    Type[] fnArgs = fn_.getArgs();
+    Type resolvedWildcardType = getResolvedWildCardType();
+    for (int i = 0; i < children_.size(); ++i) {
+      // For varargs, we must compare with the last type in fnArgs.argTypes.
+      int ix = Math.min(fnArgs.length - 1, i);
+      if (fnArgs[ix].isWildcardDecimal()) {
+        if (children_.get(i).type_.isDecimal() && ignoreWildcardDecimals) continue;
+        Preconditions.checkState(resolvedWildcardType != null);
+        if (!children_.get(i).type_.equals(resolvedWildcardType)) {
+          castChild(resolvedWildcardType, i);
+        }
+      } else if (!children_.get(i).type_.matchesType(fnArgs[ix])) {
+        castChild(fnArgs[ix], i);
+      }
+    }
+  }
+
+  /**
+   * Returns the max resolution type of all the wild card decimal types.
+   * Returns null if there are no wild card types.
+   */
+  Type getResolvedWildCardType() throws AnalysisException {
+    Type result = null;
+    Type[] fnArgs = fn_.getArgs();
+    for (int i = 0; i < children_.size(); ++i) {
+      // For varargs, we must compare with the last type in fnArgs.argTypes.
+      int ix = Math.min(fnArgs.length - 1, i);
+      if (!fnArgs[ix].isWildcardDecimal()) continue;
+
+      Type childType = children_.get(i).type_;
+      Preconditions.checkState(!childType.isWildcardDecimal(),
+          "Child expr should have been resolved.");
+      Preconditions.checkState(childType.isScalarType(),
+          "Function should not have resolved with a non-scalar child type.");
+      ScalarType decimalType = (ScalarType) childType;
+      if (result == null) {
+        result = decimalType.getMinResolutionDecimal();
+      } else {
+        result = Type.getAssignmentCompatibleType(result, childType, false);
+      }
+    }
+    if (result != null) {
+      if (result.isNull()) {
+        throw new AnalysisException(
+            "Cannot resolve DECIMAL precision and scale from NULL type.");
+      }
+      Preconditions.checkState(result.isDecimal() && !result.isWildcardDecimal());
+    }
+    return result;
+  }
+
+  /**
+   * Returns true if e is a CastExpr and the target type is a decimal.
+   */
+  private boolean isExplicitCastToDecimal(Expr e) {
+    if (!(e instanceof CastExpr)) return false;
+    CastExpr c = (CastExpr)e;
+    return !c.isImplicit() && c.getType().isDecimal();
+  }
+
+  /**
+   * Returns a clone of child with all decimal-typed NumericLiterals in it explicitly
+   * cast to targetType.
+   */
+  private Expr convertDecimalLiteralsToFloat(Analyzer analyzer, Expr child,
+      Type targetType) throws AnalysisException {
+    if (!targetType.isFloatingPointType() && !targetType.isIntegerType()) return child;
+    if (targetType.isIntegerType()) targetType = Type.DOUBLE;
+    List<NumericLiteral> literals = Lists.newArrayList();
+    child.collectAll(Predicates.instanceOf(NumericLiteral.class), literals);
+    ExprSubstitutionMap smap = new ExprSubstitutionMap();
+    for (NumericLiteral l: literals) {
+      if (!l.getType().isDecimal()) continue;
+      NumericLiteral castLiteral = (NumericLiteral) l.clone();
+      castLiteral.explicitlyCastToFloat(targetType);
+      smap.put(l, castLiteral);
+    }
+    return child.substitute(smap, analyzer, false);
+  }
+
+  /**
+   * Converts numeric literal in the expr tree rooted at this expr to return floating
+   * point types instead of decimals, if possible.
+   *
+   * Decimal has a higher processing cost than floating point and we should not pay
+   * the cost if the user does not require the accuracy. For example:
+   * "select float_col + 1.1" would start out with 1.1 as a decimal(2,1) and the
+   * float_col would be promoted to a high accuracy decimal. This function will identify
+   * this case and treat 1.1 as a float.
+   * In the case of "decimal_col + 1.1", 1.1 would remain a decimal.
+   * In the case of "float_col + cast(1.1 as decimal(2,1))", the result would be a
+   * decimal.
+   *
+   * Another way to think about it is that DecimalLiterals are analyzed as returning
+   * decimals (of the narrowest precision/scale) and we later convert them to a floating
+   * point type when it is consistent with the user's intent.
+   *
+   * TODO: another option is to do constant folding in the FE and then apply this rule.
+   */
+  protected void convertNumericLiteralsFromDecimal(Analyzer analyzer)
+      throws AnalysisException {
+    Preconditions.checkState(this instanceof ArithmeticExpr ||
+        this instanceof BinaryPredicate);
+    if (children_.size() == 1) return; // Do not attempt to convert for unary ops
+    Preconditions.checkState(children_.size() == 2);
+    Type t0 = getChild(0).getType();
+    Type t1 = getChild(1).getType();
+    boolean c0IsConstantDecimal = getChild(0).isConstant() && t0.isDecimal();
+    boolean c1IsConstantDecimal = getChild(1).isConstant() && t1.isDecimal();
+    if (c0IsConstantDecimal && c1IsConstantDecimal) return;
+    if (!c0IsConstantDecimal && !c1IsConstantDecimal) return;
+
+    // Only child(0) or child(1) is a const decimal. See if we can cast it to
+    // the type of the other child.
+    if (c0IsConstantDecimal && !isExplicitCastToDecimal(getChild(0))) {
+      Expr c0 = convertDecimalLiteralsToFloat(analyzer, getChild(0), t1);
+      setChild(0, c0);
+    }
+    if (c1IsConstantDecimal && !isExplicitCastToDecimal(getChild(1))) {
+      Expr c1 = convertDecimalLiteralsToFloat(analyzer, getChild(1), t0);
+      setChild(1, c1);
+    }
+  }
+
+  /**
+   * Helper function: analyze list of exprs
+   */
+  public static void analyze(List<? extends Expr> exprs, Analyzer analyzer)
+      throws AnalysisException {
+    if (exprs == null) return;
+    for (Expr expr: exprs) {
+      expr.analyze(analyzer);
+    }
+  }
+
+  @Override
+  public String toSql() {
+    return (printSqlInParens_) ? "(" + toSqlImpl() + ")" : toSqlImpl();
+  }
+
+  /**
+   * Returns a SQL string representing this expr. Subclasses should override this method
+   * instead of toSql() to ensure that parenthesis are properly added around the toSql().
+   */
+  protected abstract String toSqlImpl();
+
+  // Convert this expr, including all children, to its Thrift representation.
+  public TExpr treeToThrift() {
+    if (type_.isNull()) {
+      // Hack to ensure BE never sees TYPE_NULL. If an expr makes it this far without
+      // being cast to a non-NULL type, the type doesn't matter and we can cast it
+      // arbitrarily.
+      Preconditions.checkState(this instanceof NullLiteral || this instanceof SlotRef);
+      return NullLiteral.create(ScalarType.BOOLEAN).treeToThrift();
+    }
+    TExpr result = new TExpr();
+    treeToThriftHelper(result);
+    return result;
+  }
+
+  // Append a flattened version of this expr, including all children, to 'container'.
+  protected void treeToThriftHelper(TExpr container) {
+    Preconditions.checkState(isAnalyzed_,
+        "Must be analyzed before serializing to thrift. %s", this);
+    Preconditions.checkState(!type_.isWildcardDecimal());
+    // The BE should never see TYPE_NULL
+    Preconditions.checkState(!type_.isNull(), "Expr has type null!");
+    TExprNode msg = new TExprNode();
+    msg.type = type_.toThrift();
+    msg.num_children = children_.size();
+    if (fn_ != null) {
+      msg.setFn(fn_.toThrift());
+      if (fn_.hasVarArgs()) msg.setVararg_start_idx(fn_.getNumArgs() - 1);
+    }
+    toThrift(msg);
+    container.addToNodes(msg);
+    for (Expr child: children_) {
+      child.treeToThriftHelper(container);
+    }
+  }
+
+  // Convert this expr into msg (excluding children), which requires setting
+  // msg.op as well as the expr-specific field.
+  protected abstract void toThrift(TExprNode msg);
+
+  /**
+   * Returns the product of the given exprs' number of distinct values or -1 if any of
+   * the exprs have an invalid number of distinct values.
+   */
+  public static long getNumDistinctValues(List<Expr> exprs) {
+    if (exprs == null || exprs.isEmpty()) return 0;
+    long numDistinctValues = 1;
+    for (Expr expr: exprs) {
+      if (expr.getNumDistinctValues() == -1) {
+        numDistinctValues = -1;
+        break;
+      }
+      numDistinctValues *= expr.getNumDistinctValues();
+    }
+    return numDistinctValues;
+  }
+
+  public static List<TExpr> treesToThrift(List<? extends Expr> exprs) {
+    List<TExpr> result = Lists.newArrayList();
+    for (Expr expr: exprs) {
+      result.add(expr.treeToThrift());
+    }
+    return result;
+  }
+
+  public static com.google.common.base.Predicate<Expr> isAggregatePredicate() {
+    return isAggregatePredicate_;
+  }
+
+  public boolean isAggregate() {
+    return isAggregatePredicate_.apply(this);
+  }
+
+  public List<String> childrenToSql() {
+    List<String> result = Lists.newArrayList();
+    for (Expr child: children_) {
+      result.add(child.toSql());
+    }
+    return result;
+  }
+
+  public String debugString() {
+    return (id_ != null ? "exprid=" + id_.toString() + " " : "") + debugString(children_);
+  }
+
+  public static String debugString(List<? extends Expr> exprs) {
+    if (exprs == null || exprs.isEmpty()) return "";
+    List<String> strings = Lists.newArrayList();
+    for (Expr expr: exprs) {
+      strings.add(expr.debugString());
+    }
+    return Joiner.on(" ").join(strings);
+  }
+
+  public static String toSql(List<? extends Expr> exprs) {
+    if (exprs == null || exprs.isEmpty()) return "";
+    List<String> strings = Lists.newArrayList();
+    for (Expr expr: exprs) {
+      strings.add(expr.toSql());
+    }
+    return Joiner.on(", ").join(strings);
+  }
+
+  /**
+   * Returns true if two expressions are equal. The equality comparison works on analyzed
+   * as well as unanalyzed exprs by ignoring implicit casts (see CastExpr.equals()).
+   */
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) return false;
+    if (obj.getClass() != this.getClass()) return false;
+    // don't compare type, this could be called pre-analysis
+    Expr expr = (Expr) obj;
+    if (children_.size() != expr.children_.size()) return false;
+    for (int i = 0; i < children_.size(); ++i) {
+      if (!children_.get(i).equals(expr.children_.get(i))) return false;
+    }
+    if (fn_ == null && expr.fn_ == null) return true;
+    if (fn_ == null || expr.fn_ == null) return false; // One null, one not
+    // Both fn_'s are not null
+    return fn_.equals(expr.fn_);
+  }
+
+  /**
+   * Return true if l1[i].equals(l2[i]) for all i.
+   */
+  public static <C extends Expr> boolean equalLists(List<C> l1, List<C> l2) {
+    if (l1.size() != l2.size()) return false;
+    Iterator<C> l1Iter = l1.iterator();
+    Iterator<C> l2Iter = l2.iterator();
+    while (l1Iter.hasNext()) {
+      if (!l1Iter.next().equals(l2Iter.next())) return false;
+    }
+    return true;
+  }
+
+  /**
+   * Return true if l1 equals l2 when both lists are interpreted as sets.
+   * TODO: come up with something better than O(n^2)?
+   */
+  public static <C extends Expr> boolean equalSets(List<C> l1, List<C> l2) {
+    if (l1.size() != l2.size()) return false;
+    return l1.containsAll(l2) && l2.containsAll(l1);
+  }
+
+  /**
+   * Return true if l1 is a subset of l2.
+   */
+  public static <C extends Expr> boolean isSubset(List<C> l1, List<C> l2) {
+    if (l1.size() > l2.size()) return false;
+    return l2.containsAll(l1);
+  }
+
+  /**
+   * Return the intersection of l1 and l2.599
+   */
+  public static <C extends Expr> List<C> intersect(List<C> l1, List<C> l2) {
+    List<C> result = new ArrayList<C>();
+    for (C element: l1) {
+      if (l2.contains(element)) result.add(element);
+    }
+    return result;
+  }
+
+  /**
+   * Compute the intersection of l1 and l2, given the smap, and
+   * return the intersecting l1 elements in i1 and the intersecting l2 elements in i2.
+   */
+  public static void intersect(Analyzer analyzer,
+      List<Expr> l1, List<Expr> l2, ExprSubstitutionMap smap,
+      List<Expr> i1, List<Expr> i2) {
+    i1.clear();
+    i2.clear();
+    List<Expr> s1List = Expr.substituteList(l1, smap, analyzer, false);
+    Preconditions.checkState(s1List.size() == l1.size());
+    List<Expr> s2List = Expr.substituteList(l2, smap, analyzer, false);
+    Preconditions.checkState(s2List.size() == l2.size());
+    for (int i = 0; i < s1List.size(); ++i) {
+      Expr s1 = s1List.get(i);
+      for (int j = 0; j < s2List.size(); ++j) {
+        Expr s2 = s2List.get(j);
+        if (s1.equals(s2)) {
+          i1.add(l1.get(i));
+          i2.add(l2.get(j));
+          break;
+        }
+      }
+    }
+  }
+
+  @Override
+  public int hashCode() {
+    if (id_ == null) {
+      throw new UnsupportedOperationException("Expr.hashCode() is not implemented");
+    } else {
+      return id_.asInt();
+    }
+  }
+
+  /**
+   * Gather conjuncts from this expr and return them in a list.
+   * A conjunct is an expr that returns a boolean, e.g., Predicates, function calls,
+   * SlotRefs, etc. Hence, this method is placed here and not in Predicate.
+   */
+  public List<Expr> getConjuncts() {
+    List<Expr> list = Lists.newArrayList();
+    if (this instanceof CompoundPredicate
+        && ((CompoundPredicate) this).getOp() == CompoundPredicate.Operator.AND) {
+      // TODO: we have to convert CompoundPredicate.AND to two expr trees for
+      // conjuncts because NULLs are handled differently for CompoundPredicate.AND
+      // and conjunct evaluation.  This is not optimal for jitted exprs because it
+      // will result in two functions instead of one. Create a new CompoundPredicate
+      // Operator (i.e. CONJUNCT_AND) with the right NULL semantics and use that
+      // instead
+      list.addAll((getChild(0)).getConjuncts());
+      list.addAll((getChild(1)).getConjuncts());
+    } else {
+      list.add(this);
+    }
+    return list;
+  }
+
+  /**
+   * Returns an analyzed clone of 'this' with exprs substituted according to smap.
+   * Removes implicit casts and analysis state while cloning/substituting exprs within
+   * this tree, such that the returned result has minimal implicit casts and types.
+   * Throws if analyzing the post-substitution expr tree failed.
+   * If smap is null, this function is equivalent to clone().
+   * If preserveRootType is true, the resulting expr tree will be cast if necessary to
+   * the type of 'this'.
+   */
+  public Expr trySubstitute(ExprSubstitutionMap smap, Analyzer analyzer,
+      boolean preserveRootType)
+      throws AnalysisException {
+    Expr result = clone();
+    // Return clone to avoid removing casts.
+    if (smap == null) return result;
+    result = result.substituteImpl(smap, analyzer);
+    result.analyze(analyzer);
+    if (preserveRootType && !type_.equals(result.getType())) result = result.castTo(type_);
+    return result;
+  }
+
+  /**
+   * Returns an analyzed clone of 'this' with exprs substituted according to smap.
+   * Removes implicit casts and analysis state while cloning/substituting exprs within
+   * this tree, such that the returned result has minimal implicit casts and types.
+   * Expects the analysis of the post-substitution expr to succeed.
+   * If smap is null, this function is equivalent to clone().
+   * If preserveRootType is true, the resulting expr tree will be cast if necessary to
+   * the type of 'this'.
+   */
+  public Expr substitute(ExprSubstitutionMap smap, Analyzer analyzer,
+      boolean preserveRootType) {
+    try {
+      return trySubstitute(smap, analyzer, preserveRootType);
+    } catch (Exception e) {
+      throw new IllegalStateException("Failed analysis after expr substitution.", e);
+    }
+  }
+
+  public static ArrayList<Expr> trySubstituteList(Iterable<? extends Expr> exprs,
+      ExprSubstitutionMap smap, Analyzer analyzer, boolean preserveRootTypes)
+          throws AnalysisException {
+    if (exprs == null) return null;
+    ArrayList<Expr> result = new ArrayList<Expr>();
+    for (Expr e: exprs) {
+      result.add(e.trySubstitute(smap, analyzer, preserveRootTypes));
+    }
+    return result;
+  }
+
+  public static ArrayList<Expr> substituteList(Iterable<? extends Expr> exprs,
+      ExprSubstitutionMap smap, Analyzer analyzer, boolean preserveRootTypes) {
+    try {
+      return trySubstituteList(exprs, smap, analyzer, preserveRootTypes);
+    } catch (Exception e) {
+      throw new IllegalStateException("Failed analysis after expr substitution.", e);
+    }
+  }
+
+  /**
+   * Recursive method that performs the actual substitution for try/substitute() while
+   * removing implicit casts. Resets the analysis state in all non-SlotRef expressions.
+   * Exprs that have non-child exprs which should be affected by substitutions must
+   * override this method and apply the substitution to such exprs as well.
+   */
+  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws AnalysisException {
+    if (isImplicitCast()) return getChild(0).substituteImpl(smap, analyzer);
+    if (smap != null) {
+      Expr substExpr = smap.get(this);
+      if (substExpr != null) return substExpr.clone();
+    }
+    for (int i = 0; i < children_.size(); ++i) {
+      children_.set(i, children_.get(i).substituteImpl(smap, analyzer));
+    }
+    // SlotRefs must remain analyzed to support substitution across query blocks. All
+    // other exprs must be analyzed again after the substitution to add implicit casts
+    // and for resolving their correct function signature.
+    if (!(this instanceof SlotRef)) resetAnalysisState();
+    return this;
+  }
+
+  /**
+   * Resets the internal state of this expr produced by analyze().
+   * Only modifies this expr, and not its child exprs.
+   */
+  protected void resetAnalysisState() { isAnalyzed_ = false; }
+
+  /**
+   * Resets the internal analysis state of this expr tree. Removes implicit casts.
+   */
+  public Expr reset() {
+    if (isImplicitCast()) return getChild(0).reset();
+    for (int i = 0; i < children_.size(); ++i) {
+      children_.set(i, children_.get(i).reset());
+    }
+    resetAnalysisState();
+    return this;
+  }
+
+  public static ArrayList<Expr> resetList(ArrayList<Expr> l) {
+    for (int i = 0; i < l.size(); ++i) {
+      l.set(i, l.get(i).reset());
+    }
+    return l;
+  }
+
+  /**
+   * Creates a deep copy of this expr including its analysis state. The method is
+   * abstract in this class to force new Exprs to implement it.
+   */
+  @Override
+  public abstract Expr clone();
+
+  /**
+   * Create a deep copy of 'l'. The elements of the returned list are of the same
+   * type as the input list.
+   */
+  public static <C extends Expr> ArrayList<C> cloneList(List<C> l) {
+    Preconditions.checkNotNull(l);
+    ArrayList<C> result = new ArrayList<C>(l.size());
+    for (Expr element: l) {
+      result.add((C) element.clone());
+    }
+    return result;
+  }
+
+  /**
+   * Removes duplicate exprs (according to equals()).
+   */
+  public static <C extends Expr> void removeDuplicates(List<C> l) {
+    if (l == null) return;
+    ListIterator<C> it1 = l.listIterator();
+    while (it1.hasNext()) {
+      C e1 = it1.next();
+      ListIterator<C> it2 = l.listIterator();
+      boolean duplicate = false;
+      while (it2.hasNext()) {
+        C e2 = it2.next();
+          // only check up to but excluding e1
+        if (e1 == e2) break;
+        if (e1.equals(e2)) {
+          duplicate = true;
+          break;
+        }
+      }
+      if (duplicate) it1.remove();
+    }
+  }
+
+  /**
+   * Removes constant exprs
+   */
+  public static <C extends Expr> void removeConstants(List<C> l) {
+    if (l == null) return;
+    ListIterator<C> it = l.listIterator();
+    while (it.hasNext()) {
+      C e = it.next();
+      if (e.isConstant()) it.remove();
+    }
+  }
+
+  /**
+   * Returns true if expr is fully bound by tid, otherwise false.
+   */
+  public boolean isBound(TupleId tid) {
+    return isBoundByTupleIds(Lists.newArrayList(tid));
+  }
+
+  /**
+   * Returns true if expr is fully bound by tids, otherwise false.
+   */
+  public boolean isBoundByTupleIds(List<TupleId> tids) {
+    for (Expr child: children_) {
+      if (!child.isBoundByTupleIds(tids)) return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns true if expr is fully bound by slotId, otherwise false.
+   */
+  public boolean isBound(SlotId slotId) {
+    return isBoundBySlotIds(Lists.newArrayList(slotId));
+  }
+
+  /**
+   * Returns true if expr is fully bound by slotIds, otherwise false.
+   */
+  public boolean isBoundBySlotIds(List<SlotId> slotIds) {
+    for (Expr child: children_) {
+      if (!child.isBoundBySlotIds(slotIds)) return false;
+    }
+    return true;
+  }
+
+  public static boolean isBound(List<? extends Expr> exprs, List<TupleId> tids) {
+    for (Expr expr: exprs) {
+      if (!expr.isBoundByTupleIds(tids)) return false;
+    }
+    return true;
+  }
+
+  public static Expr getFirstBoundChild(Expr expr, List<TupleId> tids) {
+    for (Expr child: expr.getChildren()) {
+      if (child.isBoundByTupleIds(tids)) return child;
+    }
+    return null;
+  }
+
+  public void getIds(List<TupleId> tupleIds, List<SlotId> slotIds) {
+    Set<TupleId> tupleIdSet = Sets.newHashSet();
+    Set<SlotId> slotIdSet = Sets.newHashSet();
+    getIdsHelper(tupleIdSet, slotIdSet);
+    if (tupleIds != null) tupleIds.addAll(tupleIdSet);
+    if (slotIds != null) slotIds.addAll(slotIdSet);
+  }
+
+  protected void getIdsHelper(Set<TupleId> tupleIds, Set<SlotId> slotIds) {
+    for (Expr child: children_) {
+      child.getIdsHelper(tupleIds, slotIds);
+    }
+  }
+
+  public static <C extends Expr> void getIds(List<? extends Expr> exprs,
+      List<TupleId> tupleIds, List<SlotId> slotIds) {
+    if (exprs == null) return;
+    for (Expr e: exprs) {
+      e.getIds(tupleIds, slotIds);
+    }
+  }
+
+  /**
+   * @return true if this is an instance of LiteralExpr
+   */
+  public boolean isLiteral() {
+    return this instanceof LiteralExpr;
+  }
+
+  /**
+   * @return true if this expr can be evaluated with Expr::GetValue(NULL),
+   * i.e. if it doesn't contain any references to runtime variables (e.g. slot refs).
+   * Expr subclasses should override this if necessary (e.g. SlotRef, Subquery, etc.
+   * always return false).
+   */
+  public boolean isConstant() {
+    for (Expr expr : children_) {
+      if (!expr.isConstant()) return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return true if this expr is either a null literal or a cast from
+   * a null literal.
+   */
+  public boolean isNullLiteral() {
+    if (this instanceof NullLiteral) return true;
+    if (!(this instanceof CastExpr)) return false;
+    Preconditions.checkState(children_.size() == 1);
+    return children_.get(0).isNullLiteral();
+  }
+
+  /**
+   * Return true if this expr is a scalar subquery.
+   */
+  public boolean isScalarSubquery() {
+    Preconditions.checkState(isAnalyzed_);
+    return this instanceof Subquery && getType().isScalarType();
+  }
+
+  /**
+   * Checks whether this expr returns a boolean type or NULL type.
+   * If not, throws an AnalysisException with an appropriate error message using
+   * 'name' as a prefix. For example, 'name' could be "WHERE clause".
+   * The error message only contains this.toSql() if printExpr is true.
+   */
+  public void checkReturnsBool(String name, boolean printExpr) throws AnalysisException {
+    if (!type_.isBoolean() && !type_.isNull()) {
+      throw new AnalysisException(
+          String.format("%s%s requires return type 'BOOLEAN'. " +
+              "Actual type is '%s'.", name, (printExpr) ? " '" + toSql() + "'" : "",
+              type_.toString()));
+    }
+  }
+
+  /**
+   * Casts this expr to a specific target type. It checks the validity of the cast and
+   * calls uncheckedCastTo().
+   * @param targetType
+   *          type to be cast to
+   * @return cast expression, or converted literal,
+   *         should never return null
+   * @throws AnalysisException
+   *           when an invalid cast is asked for, for example,
+   *           failure to convert a string literal to a date literal
+   */
+  public final Expr castTo(Type targetType) throws AnalysisException {
+    Type type = Type.getAssignmentCompatibleType(this.type_, targetType, false);
+    Preconditions.checkState(type.isValid(), "cast %s to %s", this.type_, targetType);
+    // If the targetType is NULL_TYPE then ignore the cast because NULL_TYPE
+    // is compatible with all types and no cast is necessary.
+    if (targetType.isNull()) return this;
+    if (!targetType.isDecimal()) {
+      // requested cast must be to assignment-compatible type
+      // (which implies no loss of precision)
+      Preconditions.checkArgument(targetType.equals(type),
+          "targetType=" + targetType + " type=" + type);
+    }
+    return uncheckedCastTo(targetType);
+  }
+
+  /**
+   * Create an expression equivalent to 'this' but returning targetType;
+   * possibly by inserting an implicit cast,
+   * or by returning an altogether new expression
+   * or by returning 'this' with a modified return type'.
+   * @param targetType
+   *          type to be cast to
+   * @return cast expression, or converted literal,
+   *         should never return null
+   * @throws AnalysisException
+   *           when an invalid cast is asked for, for example,
+   *           failure to convert a string literal to a date literal
+   */
+  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
+    return new CastExpr(targetType, this);
+  }
+
+  /**
+   * Add a cast expression above child.
+   * If child is a literal expression, we attempt to
+   * convert the value of the child directly, and not insert a cast node.
+   * @param targetType
+   *          type to be cast to
+   * @param childIndex
+   *          index of child to be cast
+   */
+  public void castChild(Type targetType, int childIndex) throws AnalysisException {
+    Expr child = getChild(childIndex);
+    Expr newChild = child.castTo(targetType);
+    setChild(childIndex, newChild);
+  }
+
+
+  /**
+   * Convert child to to targetType, possibly by inserting an implicit cast, or by
+   * returning an altogether new expression, or by returning 'this' with a modified
+   * return type'.
+   * @param targetType
+   *          type to be cast to
+   * @param childIndex
+   *          index of child to be cast
+   */
+  protected void uncheckedCastChild(Type targetType, int childIndex)
+      throws AnalysisException {
+    Expr child = getChild(childIndex);
+    Expr newChild = child.uncheckedCastTo(targetType);
+    setChild(childIndex, newChild);
+  }
+
+  /**
+   * Returns child expr if this expr is an implicit cast, otherwise returns 'this'.
+   */
+  public Expr ignoreImplicitCast() {
+    if (isImplicitCast()) return getChild(0).ignoreImplicitCast();
+    return this;
+  }
+
+  /**
+   * Returns true if 'this' is an implicit cast expr.
+   */
+  public boolean isImplicitCast() {
+    return this instanceof CastExpr && ((CastExpr) this).isImplicit();
+  }
+
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this.getClass())
+        .add("id", id_)
+        .add("type", type_)
+        .add("sel", selectivity_)
+        .add("evalCost", evalCost_)
+        .add("#distinct", numDistinctValues_)
+        .toString();
+  }
+
+  /**
+   * If 'this' is a SlotRef or a Cast that wraps a SlotRef, returns that SlotRef.
+   * Otherwise returns null.
+   */
+  public SlotRef unwrapSlotRef(boolean implicitOnly) {
+    if (this instanceof SlotRef) {
+      return (SlotRef) this;
+    } else if (this instanceof CastExpr
+        && (!implicitOnly || ((CastExpr) this).isImplicit())
+        && getChild(0) instanceof SlotRef) {
+      return (SlotRef) getChild(0);
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Returns the descriptor of the scan slot that directly or indirectly produces
+   * the values of 'this' SlotRef. Traverses the source exprs of intermediate slot
+   * descriptors to resolve materialization points (e.g., aggregations).
+   * Returns null if 'e' or any source expr of 'e' is not a SlotRef or cast SlotRef.
+   */
+  public SlotDescriptor findSrcScanSlot() {
+    SlotRef slotRef = unwrapSlotRef(false);
+    if (slotRef == null) return null;
+    SlotDescriptor slotDesc = slotRef.getDesc();
+    if (slotDesc.isScanSlot()) return slotDesc;
+    if (slotDesc.getSourceExprs().size() == 1) {
+      return slotDesc.getSourceExprs().get(0).findSrcScanSlot();
+    }
+    // No known source expr, or there are several source exprs meaning the slot is
+    // has no single source table.
+    return null;
+  }
+
+  /**
+   * Pushes negation to the individual operands of a predicate
+   * tree rooted at 'root'.
+   */
+  public static Expr pushNegationToOperands(Expr root) {
+    Preconditions.checkNotNull(root);
+    if (Expr.IS_NOT_PREDICATE.apply(root)) {
+      try {
+        // Make sure we call function 'negate' only on classes that support it,
+        // otherwise we may recurse infinitely.
+        Method m = root.getChild(0).getClass().getDeclaredMethod(NEGATE_FN);
+        return pushNegationToOperands(root.getChild(0).negate());
+      } catch (NoSuchMethodException e) {
+        // The 'negate' function is not implemented. Break the recursion.
+        return root;
+      }
+    }
+
+    if (root instanceof CompoundPredicate) {
+      Expr left = pushNegationToOperands(root.getChild(0));
+      Expr right = pushNegationToOperands(root.getChild(1));
+      return new CompoundPredicate(((CompoundPredicate)root).getOp(), left, right);
+    }
+
+    return root;
+  }
+
+  /**
+   * Negates a boolean Expr.
+   */
+  public Expr negate() {
+    Preconditions.checkState(type_.getPrimitiveType() == PrimitiveType.BOOLEAN);
+    return new CompoundPredicate(CompoundPredicate.Operator.NOT, this, null);
+  }
+
+  /**
+   * Returns the subquery of an expr. Returns null if this expr does not contain
+   * a subquery.
+   *
+   * TODO: Support predicates with more that one subqueries when we implement
+   * the independent subquery evaluation.
+   */
+  public Subquery getSubquery() {
+    if (!contains(Subquery.class)) return null;
+    List<Subquery> subqueries = Lists.newArrayList();
+    collect(Subquery.class, subqueries);
+    Preconditions.checkState(subqueries.size() == 1);
+    return subqueries.get(0);
+  }
+
+  /**
+   * For children of 'this' that are constant expressions and the type of which has a
+   * LiteralExpr subclass, evaluate them in the BE and substitute the child with the
+   * resulting LiteralExpr. Modifies 'this' in place and does not re-analyze it. Hence,
+   * it is not safe to evaluate the modified expr in the BE as the resolved fn_ may be
+   * incorrect given the new arguments.
+   *
+   * Throws an AnalysisException if the evaluation fails in the BE.
+   *
+   * TODO: Convert to a generic constant expr folding function to be used during analysis.
+   */
+  public void foldConstantChildren(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(isAnalyzed_);
+    Preconditions.checkNotNull(analyzer);
+    for (int i = 0; i < children_.size(); ++i) {
+      Expr child = getChild(i);
+      if (child.isLiteral() || !child.isConstant()) continue;
+      LiteralExpr literalExpr = LiteralExpr.create(child, analyzer.getQueryCtx());
+      if (literalExpr == null) continue;
+      setChild(i, literalExpr);
+    }
+    isAnalyzed_ = false;
+  }
+
+  /**
+   * Returns true iff all of this Expr's children have their costs set.
+   */
+  protected boolean hasChildCosts() {
+    for (Expr child : children_) {
+      if (!child.hasCost()) return false;
+    }
+    return true;
+  }
+
+  /**
+   * Computes and returns the sum of the costs of all of this Expr's children.
+   */
+  protected float getChildCosts() {
+    float cost = 0;
+    for (Expr child : children_) cost += child.getCost();
+    return cost;
+  }
+
+  /**
+   * Returns the average length of the values produced by an Expr
+   * of type string. Returns a default for unknown lengths.
+   */
+  protected static double getAvgStringLength(Expr e) {
+    Preconditions.checkState(e.getType().isStringType());
+    Preconditions.checkState(e.isAnalyzed_);
+
+    SlotRef ref = e.unwrapSlotRef(false);
+    if (ref != null) {
+      if (ref.getDesc() != null && ref.getDesc().getStats().getAvgSize() > 0) {
+        return ref.getDesc().getStats().getAvgSize();
+      } else {
+        return DEFAULT_AVG_STRING_LENGTH;
+      }
+    } else if (e instanceof StringLiteral) {
+      return ((StringLiteral) e).getValue().length();
+    } else {
+      // TODO(tmarshall): Extend this to support other string Exprs, such as
+      // function calls that return string.
+      return DEFAULT_AVG_STRING_LENGTH;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ExprId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprId.java b/fe/src/main/java/org/apache/impala/analysis/ExprId.java
new file mode 100644
index 0000000..52292f5
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ExprId.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.common.Id;
+import com.cloudera.impala.common.IdGenerator;
+
+public class ExprId extends Id<ExprId> {
+  // Construction only allowed via an IdGenerator.
+  protected ExprId(int id) {
+    super(id);
+  }
+
+  public static IdGenerator<ExprId> createGenerator() {
+    return new IdGenerator<ExprId>() {
+      @Override
+      public ExprId getNextId() { return new ExprId(nextId_++); }
+      @Override
+      public ExprId getMaxId() { return new ExprId(nextId_ - 1); }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
new file mode 100644
index 0000000..cbff71a
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
@@ -0,0 +1,176 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Map of expression substitutions: lhs[i] gets substituted with rhs[i].
+ * To support expression substitution across query blocks, rhs exprs must already be
+ * analyzed when added to this map. Otherwise, analysis of a SlotRef may fail after
+ * substitution, e.g., because the table it refers to is in a different query block
+ * that is not visible.
+ * See Expr.substitute() and related functions for details on the actual substitution.
+ */
+public final class ExprSubstitutionMap {
+  private final static Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class);
+
+  private List<Expr> lhs_; // left-hand side
+  private List<Expr> rhs_; // right-hand side
+
+  public ExprSubstitutionMap() {
+    this(Lists.<Expr>newArrayList(), Lists.<Expr>newArrayList());
+  }
+
+  public ExprSubstitutionMap(List<Expr> lhs, List<Expr> rhs) {
+    lhs_ = lhs;
+    rhs_ = rhs;
+  }
+
+  /**
+   * Add an expr mapping. The rhsExpr must be analyzed to support correct substitution
+   * across query blocks. It is not required that the lhsExpr is analyzed.
+   */
+  public void put(Expr lhsExpr, Expr rhsExpr) {
+    Preconditions.checkState(rhsExpr.isAnalyzed_, "Rhs expr must be analyzed.");
+    lhs_.add(lhsExpr);
+    rhs_.add(rhsExpr);
+  }
+
+  /**
+   * Returns the expr mapped to lhsExpr or null if no mapping to lhsExpr exists.
+   */
+  public Expr get(Expr lhsExpr) {
+    for (int i = 0; i < lhs_.size(); ++i) {
+      if (lhsExpr.equals(lhs_.get(i))) return rhs_.get(i);
+    }
+    return null;
+  }
+
+  /**
+   * Returns true if the smap contains a mapping for lhsExpr.
+   */
+  public boolean containsMappingFor(Expr lhsExpr) {
+    return lhs_.contains(lhsExpr);
+  }
+
+  /**
+   * Return a map  which is equivalent to applying f followed by g,
+   * i.e., g(f()).
+   * Always returns a non-null map.
+   */
+  public static ExprSubstitutionMap compose(ExprSubstitutionMap f, ExprSubstitutionMap g,
+      Analyzer analyzer) {
+    if (f == null && g == null) return new ExprSubstitutionMap();
+    if (f == null) return g;
+    if (g == null) return f;
+    ExprSubstitutionMap result = new ExprSubstitutionMap();
+    // f's substitution targets need to be substituted via g
+    result.lhs_ = Expr.cloneList(f.lhs_);
+    result.rhs_ = Expr.substituteList(f.rhs_, g, analyzer, false);
+
+    // substitution maps are cumulative: the combined map contains all
+    // substitutions from f and g.
+    for (int i = 0; i < g.lhs_.size(); i++) {
+      // If f contains expr1->fn(expr2) and g contains expr2->expr3,
+      // then result must contain expr1->fn(expr3).
+      // The check before adding to result.lhs is to ensure that cases
+      // where expr2.equals(expr1) are handled correctly.
+      // For example f: count(*) -> zeroifnull(count(*))
+      // and g: count(*) -> slotref
+      // result.lhs must only have: count(*) -> zeroifnull(slotref) from f above,
+      // and not count(*) -> slotref from g as well.
+      if (!result.lhs_.contains(g.lhs_.get(i))) {
+        result.lhs_.add(g.lhs_.get(i).clone());
+        result.rhs_.add(g.rhs_.get(i).clone());
+      }
+    }
+
+    result.verify();
+    return result;
+  }
+
+  /**
+   * Returns the union of two substitution maps. Always returns a non-null map.
+   */
+  public static ExprSubstitutionMap combine(ExprSubstitutionMap f,
+      ExprSubstitutionMap g) {
+    if (f == null && g == null) return new ExprSubstitutionMap();
+    if (f == null) return g;
+    if (g == null) return f;
+    ExprSubstitutionMap result = new ExprSubstitutionMap();
+    result.lhs_ = Lists.newArrayList(f.lhs_);
+    result.lhs_.addAll(g.lhs_);
+    result.rhs_ = Lists.newArrayList(f.rhs_);
+    result.rhs_.addAll(g.rhs_);
+    result.verify();
+    return result;
+  }
+
+  public void substituteLhs(ExprSubstitutionMap lhsSmap, Analyzer analyzer) {
+    lhs_ = Expr.substituteList(lhs_, lhsSmap, analyzer, false);
+  }
+
+  public List<Expr> getLhs() { return lhs_; }
+  public List<Expr> getRhs() { return rhs_; }
+
+  public int size() { return lhs_.size(); }
+
+  public String debugString() {
+    Preconditions.checkState(lhs_.size() == rhs_.size());
+    List<String> output = Lists.newArrayList();
+    for (int i = 0; i < lhs_.size(); ++i) {
+      output.add(lhs_.get(i).toSql() + ":" + rhs_.get(i).toSql());
+      output.add("(" + lhs_.get(i).debugString() + ":" + rhs_.get(i).debugString() + ")");
+    }
+    return "smap(" + Joiner.on(" ").join(output) + ")";
+  }
+
+  /**
+   * Verifies the internal state of this smap: Checks that the lhs_ has no duplicates,
+   * and that all rhs exprs are analyzed.
+   */
+  private void verify() {
+    for (int i = 0; i < lhs_.size(); ++i) {
+      for (int j = i + 1; j < lhs_.size(); ++j) {
+        if (lhs_.get(i).equals(lhs_.get(j))) {
+          LOG.info("verify: smap=" + this.debugString());
+          Preconditions.checkState(false);
+        }
+      }
+      Preconditions.checkState(rhs_.get(i).isAnalyzed_);
+    }
+  }
+
+  public void clear() {
+    lhs_.clear();
+    rhs_.clear();
+  }
+
+  @Override
+  public ExprSubstitutionMap clone() {
+    return new ExprSubstitutionMap(Expr.cloneList(lhs_), Expr.cloneList(rhs_));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java b/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
new file mode 100644
index 0000000..48b9fb3
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
@@ -0,0 +1,111 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.Set;
+
+import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExtractField;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+
+/**
+ * Representation of an EXTRACT(<Time Unit> FROM <Datetime Expr>) expression. EXTRACT(
+ * <Datetime Expr>, <String>) is not handled by FunctionCallExpr.
+ */
+public class ExtractFromExpr extends FunctionCallExpr {
+
+  // Behaves like an immutable linked hash set containing the TExtractFields in the same
+  // order as declared.
+  private static final Set<String> EXTRACT_FIELDS;
+  static {
+    ImmutableSet.Builder<String> builder = new ImmutableSet.Builder<String>();
+    for (TExtractField extractField: TExtractField.values()) {
+      if (extractField != TExtractField.INVALID_FIELD) {
+        builder.add(extractField.name());
+      }
+    }
+    EXTRACT_FIELDS = builder.build();
+  }
+
+  public ExtractFromExpr(FunctionName fnName, String extractFieldIdent, Expr e) {
+    // Note that the arguments are swapped so that they align with the EXTRACT function.
+    // There is no EXTRACT(STRING, TIMESTAMP) function because it conflicts with
+    // EXTRACT(TIMESTAMP, STRING) if STRINGs are used for TIMESTAMPs with implicit
+    // casting.
+    super(fnName, Lists.newArrayList(e, new StringLiteral(extractFieldIdent)));
+    type_ = Type.INT;
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected ExtractFromExpr(ExtractFromExpr other) {
+    super(other);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    getFnName().analyze(analyzer);
+    if (!getFnName().getFunction().equals("extract")) {
+      throw new AnalysisException("Function " + getFnName().getFunction().toUpperCase()
+          + " does not accept the keyword FROM.");
+    }
+    if ((getFnName().getDb() != null)
+        && !getFnName().getDb().equals(Catalog.BUILTINS_DB)) {
+      throw new AnalysisException("Function " + getFnName().toString() + " conflicts " +
+          "with the EXTRACT builtin.");
+    }
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+
+    String extractFieldIdent = ((StringLiteral)children_.get(1)).getValue();
+    Preconditions.checkNotNull(extractFieldIdent);
+    if (!EXTRACT_FIELDS.contains(extractFieldIdent.toUpperCase())) {
+      throw new AnalysisException("Time unit '" + extractFieldIdent + "' in expression '"
+          + toSql() + "' is invalid. Expected one of "
+          + Joiner.on(", ").join(EXTRACT_FIELDS) + ".");
+    }
+  }
+
+  @Override
+  protected String getFunctionNotFoundError(Type[] argTypes) {
+    Expr e = children_.get(0);
+    return "Expression '" + e.toSql() + "' in '" + toSql() + "' has a return type of "
+          + e.getType().toSql() + " but a TIMESTAMP is required.";
+  }
+
+  @Override
+  public String toSqlImpl() {
+    StringBuilder strBuilder = new StringBuilder();
+    strBuilder.append(getFnName().toString().toUpperCase());
+    strBuilder.append("(");
+    strBuilder.append(((StringLiteral)getChild(1)).getValue());
+    strBuilder.append(" FROM ");
+    strBuilder.append(getChild(0).toSql());
+    strBuilder.append(")");
+    return strBuilder.toString();
+  }
+
+  @Override
+  public Expr clone() { return new ExtractFromExpr(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/FromClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FromClause.java b/fe/src/main/java/org/apache/impala/analysis/FromClause.java
new file mode 100644
index 0000000..bbe6f23
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/FromClause.java
@@ -0,0 +1,129 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Wraps a list of TableRef instances that form a FROM clause, allowing them to be
+ * analyzed independently of the statement using them. To increase the flexibility of
+ * the class it implements the Iterable interface.
+ */
+public class FromClause implements ParseNode, Iterable<TableRef> {
+
+  private final ArrayList<TableRef> tableRefs_;
+
+  private boolean analyzed_ = false;
+
+  public FromClause(List<TableRef> tableRefs) {
+    tableRefs_ = Lists.newArrayList(tableRefs);
+    // Set left table refs to ensure correct toSql() before analysis.
+    for (int i = 1; i < tableRefs_.size(); ++i) {
+      tableRefs_.get(i).setLeftTblRef(tableRefs_.get(i - 1));
+    }
+  }
+
+  public FromClause() { tableRefs_ = Lists.newArrayList(); }
+  public List<TableRef> getTableRefs() { return tableRefs_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (analyzed_) return;
+
+    if (tableRefs_.isEmpty()) {
+      analyzed_ = true;
+      return;
+    }
+
+    // Start out with table refs to establish aliases.
+    TableRef leftTblRef = null;  // the one to the left of tblRef
+    for (int i = 0; i < tableRefs_.size(); ++i) {
+      // Resolve and replace non-InlineViewRef table refs with a BaseTableRef or ViewRef.
+      TableRef tblRef = tableRefs_.get(i);
+      tblRef = analyzer.resolveTableRef(tblRef);
+      tableRefs_.set(i, Preconditions.checkNotNull(tblRef));
+      tblRef.setLeftTblRef(leftTblRef);
+      try {
+        tblRef.analyze(analyzer);
+      } catch (AnalysisException e) {
+        // Only re-throw the exception if no tables are missing.
+        if (analyzer.getMissingTbls().isEmpty()) throw e;
+      }
+      leftTblRef = tblRef;
+    }
+
+    // All tableRefs have been analyzed, but at least one table is missing metadata.
+    if (!analyzer.getMissingTbls().isEmpty()) {
+      throw new AnalysisException("Found missing tables. Aborting analysis.");
+    }
+    analyzed_ = true;
+  }
+
+  public FromClause clone() {
+    ArrayList<TableRef> clone = Lists.newArrayList();
+    for (TableRef tblRef: tableRefs_) clone.add(tblRef.clone());
+    return new FromClause(clone);
+  }
+
+  public void reset() {
+    for (int i = 0; i < size(); ++i) {
+      TableRef origTblRef = get(i);
+      if (origTblRef.isResolved() && !(origTblRef instanceof InlineViewRef)) {
+        // Replace resolved table refs with unresolved ones.
+        TableRef newTblRef = new TableRef(origTblRef);
+        // Use the fully qualified raw path to preserve the original resolution.
+        // Otherwise, non-fully qualified paths might incorrectly match a local view.
+        // TODO for 2.3: This full qualification preserves analysis state which is
+        // contrary to the intended semantics of reset(). We could address this issue by
+        // changing the WITH-clause analysis to register local views that have
+        // fully-qualified table refs, and then remove the full qualification here.
+        newTblRef.rawPath_ = origTblRef.getResolvedPath().getFullyQualifiedRawPath();
+        set(i, newTblRef);
+      }
+      get(i).reset();
+    }
+    this.analyzed_ = false;
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder builder = new StringBuilder();
+    if (!tableRefs_.isEmpty()) {
+      builder.append(" FROM ");
+      for (int i = 0; i < tableRefs_.size(); ++i) {
+        builder.append(tableRefs_.get(i).toSql());
+      }
+    }
+    return builder.toString();
+  }
+
+  public boolean isEmpty() { return tableRefs_.isEmpty(); }
+
+  @Override
+  public Iterator<TableRef> iterator() { return tableRefs_.iterator(); }
+  public int size() { return tableRefs_.size(); }
+  public TableRef get(int i) { return tableRefs_.get(i); }
+  public void set(int i, TableRef tableRef) { tableRefs_.set(i, tableRef); }
+  public void add(TableRef t) { tableRefs_.add(t); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java b/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
new file mode 100644
index 0000000..998c5fc
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
@@ -0,0 +1,67 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+// Wrapper class around argument types and if it has varArgs
+public class FunctionArgs implements ParseNode {
+  private final ArrayList<TypeDef> argTypeDefs_;
+  private boolean hasVarArgs_;
+
+  // Result of analysis.
+  private ArrayList<Type> argTypes_;
+
+  public FunctionArgs() {
+    argTypeDefs_ = Lists.newArrayList();
+    hasVarArgs_ = false;
+  }
+
+  public FunctionArgs(ArrayList<TypeDef> argTypeDefs, boolean varArgs) {
+    argTypeDefs_ = argTypeDefs;
+    hasVarArgs_ = varArgs;
+    if (varArgs) Preconditions.checkState(argTypeDefs.size() > 0);
+  }
+
+  public void setHasVarArgs(boolean b) { {
+    Preconditions.checkState(argTypeDefs_.size() > 0);
+    hasVarArgs_ = b; }
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    ArrayList<Type> argTypes = Lists.newArrayListWithCapacity(argTypeDefs_.size());
+    for (TypeDef typeDef: argTypeDefs_) {
+      typeDef.analyze(analyzer);
+      argTypes.add(typeDef.getType());
+    }
+    argTypes_ = argTypes;
+  }
+
+  public ArrayList<TypeDef> getArgTypeDefs() { return argTypeDefs_; }
+  public ArrayList<Type> getArgTypes() { return argTypes_; }
+  public boolean hasVarArgs() { return hasVarArgs_; }
+
+  @Override
+  public String toSql() { return null; }
+}


[60/61] incubator-impala git commit: Remove spurious Boost warnings on compilation errors

Posted by kw...@apache.org.
Remove spurious Boost warnings on compilation errors

Compilation errors can spuriously print warnings from Boost where the
filesystem module is used, like:

\u2018boost::system::posix_category\u2019 defined but not used

Defining BOOST_SYSTEM_NO_DEPRECATED removes those warnings, which arise
from Boost maintaining deprecated names for error codes that have moved
namespaces during the shift to C++11 (see
http://www.boost.org/doc/libs/1_61_0/boost/system/error_code.hpp and
http://www.boost.org/doc/libs/1_61_0/libs/system/doc/reference.html).

We're not using the old names, so it's ok to remove them.

Change-Id: Ib84d8a9958469fb22b0af4907958917a65e8290f
Reviewed-on: http://gerrit.cloudera.org:8080/4564
Reviewed-by: Tim Armstrong <ta...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/f640b3a3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/f640b3a3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/f640b3a3

Branch: refs/heads/master
Commit: f640b3a392dd954f65ab73be14b3670ae57ace29
Parents: b2c2fe7
Author: Henry Robinson <he...@cloudera.com>
Authored: Thu Sep 29 11:18:29 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Thu Sep 29 22:29:37 2016 +0000

----------------------------------------------------------------------
 be/CMakeLists.txt | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/f640b3a3/be/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/be/CMakeLists.txt b/be/CMakeLists.txt
index 2546bd0..a5cc4d8 100644
--- a/be/CMakeLists.txt
+++ b/be/CMakeLists.txt
@@ -44,6 +44,7 @@ SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -fno-strict-aliasing")
 SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -std=c++14")
 SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -Wno-deprecated -Wno-vla")
 SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DBOOST_DATE_TIME_POSIX_TIME_STD_CONFIG")
+SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DBOOST_SYSTEM_NO_DEPRECATED")
 SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -B $ENV{IMPALA_TOOLCHAIN}/binutils-$ENV{IMPALA_BINUTILS_VERSION}/bin/")
 IF($ENV{USE_GOLD_LINKER} STREQUAL "true")
   SET(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -Wl,-fuse-ld=gold")


[42/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/Expr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Expr.java b/fe/src/main/java/com/cloudera/impala/analysis/Expr.java
deleted file mode 100644
index fdc5bf1..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/Expr.java
+++ /dev/null
@@ -1,1258 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Root of the expr node hierarchy.
- *
- */
-abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneable {
-  private final static Logger LOG = LoggerFactory.getLogger(Expr.class);
-
-  // Limits on the number of expr children and the depth of an expr tree. These maximum
-  // values guard against crashes due to stack overflows (IMPALA-432) and were
-  // experimentally determined to be safe.
-  public final static int EXPR_CHILDREN_LIMIT = 10000;
-  // The expr depth limit is mostly due to our recursive implementation of clone().
-  public final static int EXPR_DEPTH_LIMIT = 1000;
-
-  // Name of the function that needs to be implemented by every Expr that
-  // supports negation.
-  private final static String NEGATE_FN = "negate";
-
-  // To be used where we cannot come up with a better estimate (selectivity_ is -1).
-  public static double DEFAULT_SELECTIVITY = 0.1;
-
-  // The relative costs of different Exprs. These numbers are not intended as a precise
-  // reflection of running times, but as simple heuristics for ordering Exprs from cheap
-  // to expensive.
-  // TODO(tmwarshall): Get these costs in a more principled way, eg. with a benchmark.
-  public final static float ARITHMETIC_OP_COST = 1;
-  public final static float BINARY_PREDICATE_COST = 1;
-  public final static float VAR_LEN_BINARY_PREDICATE_COST = 5;
-  public final static float CAST_COST = 1;
-  public final static float COMPOUND_PREDICATE_COST = 1;
-  public final static float FUNCTION_CALL_COST = 10;
-  public final static float IS_NOT_EMPTY_COST = 1;
-  public final static float IS_NULL_COST = 1;
-  public final static float LIKE_COST = 10;
-  public final static float LITERAL_COST = 1;
-  public final static float SLOT_REF_COST = 1;
-  public final static float TIMESTAMP_ARITHMETIC_COST = 5;
-
-  // To be used when estimating the cost of Exprs of type string where we don't otherwise
-  // have an estimate of how long the strings produced by that Expr are.
-  public final static int DEFAULT_AVG_STRING_LENGTH = 5;
-
-  // returns true if an Expr is a non-analytic aggregate.
-  private final static com.google.common.base.Predicate<Expr> isAggregatePredicate_ =
-      new com.google.common.base.Predicate<Expr>() {
-        public boolean apply(Expr arg) {
-          return arg instanceof FunctionCallExpr &&
-              ((FunctionCallExpr)arg).isAggregateFunction();
-        }
-      };
-
-  // Returns true if an Expr is a NOT CompoundPredicate.
-  public final static com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg instanceof CompoundPredicate &&
-              ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.NOT;
-        }
-      };
-
-  // Returns true if an Expr is an OR CompoundPredicate.
-  public final static com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg instanceof CompoundPredicate &&
-              ((CompoundPredicate)arg).getOp() == CompoundPredicate.Operator.OR;
-        }
-      };
-
-  // Returns true if an Expr is a scalar subquery
-  public final static com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg.isScalarSubquery();
-        }
-      };
-
-  // Returns true if an Expr is an aggregate function that returns non-null on
-  // an empty set (e.g. count).
-  public final static com.google.common.base.Predicate<Expr>
-      NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg instanceof FunctionCallExpr &&
-              ((FunctionCallExpr)arg).returnsNonNullOnEmpty();
-        }
-      };
-
-  // Returns true if an Expr is a builtin aggregate function.
-  public final static com.google.common.base.Predicate<Expr> IS_BUILTIN_AGG_FN =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg instanceof FunctionCallExpr &&
-              ((FunctionCallExpr)arg).getFnName().isBuiltin();
-        }
-      };
-
-  public final static com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) {
-          return arg instanceof BoolLiteral && ((BoolLiteral)arg).getValue();
-        }
-      };
-
-  public final static com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) { return BinaryPredicate.getEqSlots(arg) != null; }
-      };
-
-  public final static com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
-      new com.google.common.base.Predicate<Expr>() {
-        @Override
-        public boolean apply(Expr arg) { return arg instanceof BinaryPredicate; }
-      };
-
-  // id that's unique across the entire query statement and is assigned by
-  // Analyzer.registerConjuncts(); only assigned for the top-level terms of a
-  // conjunction, and therefore null for most Exprs
-  protected ExprId id_;
-
-  // true if Expr is an auxiliary predicate that was generated by the plan generation
-  // process to facilitate predicate propagation;
-  // false if Expr originated with a query stmt directly
-  private boolean isAuxExpr_ = false;
-
-  protected Type type_;  // result of analysis
-  protected boolean isAnalyzed_;  // true after analyze() has been called
-  protected boolean isOnClauseConjunct_; // set by analyzer
-
-  // Flag to indicate whether to wrap this expr's toSql() in parenthesis. Set by parser.
-  // Needed for properly capturing expr precedences in the SQL string.
-  protected boolean printSqlInParens_ = false;
-
-  // Estimated probability of a predicate evaluating to true. Set during analysis.
-  // Between 0 and 1, or set to -1 if the selectivity could not be estimated.
-  protected double selectivity_;
-
-  // Estimated relative cost of evaluating this expression, including the costs of
-  // its children. Set during analysis and used to sort conjuncts within a PlanNode.
-  // Has a default value of -1 indicating unknown cost if the cost of this expression
-  // or any of its children was not set, but it is required to be set for any
-  // expression which may be part of a conjunct.
-  protected float evalCost_;
-
-  // estimated number of distinct values produced by Expr; invalid: -1
-  // set during analysis
-  protected long numDistinctValues_;
-
-  // The function to call. This can either be a scalar or aggregate function.
-  // Set in analyze().
-  protected Function fn_;
-
-  protected Expr() {
-    super();
-    type_ = Type.INVALID;
-    selectivity_ = -1.0;
-    evalCost_ = -1.0f;
-    numDistinctValues_ = -1;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected Expr(Expr other) {
-    id_ = other.id_;
-    isAuxExpr_ = other.isAuxExpr_;
-    type_ = other.type_;
-    isAnalyzed_ = other.isAnalyzed_;
-    isOnClauseConjunct_ = other.isOnClauseConjunct_;
-    printSqlInParens_ = other.printSqlInParens_;
-    selectivity_ = other.selectivity_;
-    evalCost_ = other.evalCost_;
-    numDistinctValues_ = other.numDistinctValues_;
-    fn_ = other.fn_;
-    children_ = Expr.cloneList(other.children_);
-  }
-
-  public ExprId getId() { return id_; }
-  protected void setId(ExprId id) { id_ = id; }
-  public Type getType() { return type_; }
-  public double getSelectivity() { return selectivity_; }
-  public boolean hasSelectivity() { return selectivity_ >= 0; }
-  public float getCost() {
-    Preconditions.checkState(isAnalyzed_);
-    return evalCost_;
-  }
-  public boolean hasCost() { return evalCost_ >= 0; }
-  public long getNumDistinctValues() { return numDistinctValues_; }
-  public void setPrintSqlInParens(boolean b) { printSqlInParens_ = b; }
-  public boolean isOnClauseConjunct() { return isOnClauseConjunct_; }
-  public void setIsOnClauseConjunct(boolean b) { isOnClauseConjunct_ = b; }
-  public boolean isAuxExpr() { return isAuxExpr_; }
-  public boolean isRegisteredPredicate() { return id_ != null; }
-  public void setIsAuxExpr() { isAuxExpr_ = true; }
-  public Function getFn() { return fn_; }
-
-  /**
-   * Perform semantic analysis of node and all of its children.
-   * Throws exception if any errors found.
-   * @see com.cloudera.impala.parser.ParseNode#analyze(com.cloudera.impala.parser.Analyzer)
-   */
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Check the expr child limit.
-    if (children_.size() > EXPR_CHILDREN_LIMIT) {
-      String sql = toSql();
-      String sqlSubstr = sql.substring(0, Math.min(80, sql.length()));
-      throw new AnalysisException(String.format("Exceeded the maximum number of child " +
-          "expressions (%s).\nExpression has %s children:\n%s...",
-          EXPR_CHILDREN_LIMIT, children_.size(), sqlSubstr));
-    }
-
-    // analyzer may be null for certain literal constructions (e.g. IntLiteral).
-    if (analyzer != null) {
-      analyzer.incrementCallDepth();
-      // Check the expr depth limit. Do not print the toSql() to not overflow the stack.
-      if (analyzer.getCallDepth() > EXPR_DEPTH_LIMIT) {
-        throw new AnalysisException(String.format("Exceeded the maximum depth of an " +
-            "expression tree (%s).", EXPR_DEPTH_LIMIT));
-      }
-    }
-    for (Expr child: children_) {
-      child.analyze(analyzer);
-    }
-    isAnalyzed_ = true;
-    computeNumDistinctValues();
-
-    if (analyzer != null) analyzer.decrementCallDepth();
-  }
-
-  /**
-   * Helper function to analyze this expr and assert that the analysis was successful.
-   * TODO: This function could be used in many more places to clean up. Consider
-   * adding an IAnalyzable interface or similar to and move this helper into Analyzer
-   * such that non-Expr things can use the helper also.
-   */
-  public void analyzeNoThrow(Analyzer analyzer) {
-    try {
-      analyze(analyzer);
-    } catch (AnalysisException e) {
-      throw new IllegalStateException(e);
-    }
-  }
-
-  protected void computeNumDistinctValues() {
-    if (isConstant()) {
-      numDistinctValues_ = 1;
-    } else {
-      // if this Expr contains slotrefs, we estimate the # of distinct values
-      // to be the maximum such number for any of the slotrefs;
-      // the subclass analyze() function may well want to override this, if it
-      // knows better
-      List<SlotRef> slotRefs = Lists.newArrayList();
-      this.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
-      numDistinctValues_ = -1;
-      for (SlotRef slotRef: slotRefs) {
-        numDistinctValues_ = Math.max(numDistinctValues_, slotRef.numDistinctValues_);
-      }
-    }
-  }
-
-  /**
-   * Collects the returns types of the child nodes in an array.
-   */
-  protected Type[] collectChildReturnTypes() {
-    Type[] childTypes = new Type[children_.size()];
-    for (int i = 0; i < children_.size(); ++i) {
-      childTypes[i] = children_.get(i).type_;
-    }
-    return childTypes;
-  }
-
-  /**
-   * Looks up in the catalog the builtin for 'name' and 'argTypes'.
-   * Returns null if the function is not found.
-   */
-  protected Function getBuiltinFunction(Analyzer analyzer, String name,
-      Type[] argTypes, CompareMode mode) throws AnalysisException {
-    FunctionName fnName = new FunctionName(Catalog.BUILTINS_DB, name);
-    Function searchDesc = new Function(fnName, argTypes, Type.INVALID, false);
-    return analyzer.getCatalog().getFunction(searchDesc, mode);
-  }
-
-  /**
-   * Generates the necessary casts for the children of this expr to call fn_.
-   * child(0) is cast to the function's first argument, child(1) to the second etc.
-   * This does not do any validation and the casts are assumed to be safe.
-   *
-   * If ignoreWildcardDecimals is true, the function will not cast arguments that
-   * are wildcard decimals. This is used for builtins where the cast is done within
-   * the BE function.
-   * Otherwise, if the function signature contains wildcard decimals, each wildcard child
-   * argument will be cast to the highest resolution that can contain all of the child
-   * wildcard arguments.
-   * e.g. fn(decimal(*), decimal(*))
-   *      called with fn(decimal(10,2), decimal(5,3))
-   * both children will be cast to (11, 3).
-   */
-  protected void castForFunctionCall(boolean ignoreWildcardDecimals)
-      throws AnalysisException {
-    Preconditions.checkState(fn_ != null);
-    Type[] fnArgs = fn_.getArgs();
-    Type resolvedWildcardType = getResolvedWildCardType();
-    for (int i = 0; i < children_.size(); ++i) {
-      // For varargs, we must compare with the last type in fnArgs.argTypes.
-      int ix = Math.min(fnArgs.length - 1, i);
-      if (fnArgs[ix].isWildcardDecimal()) {
-        if (children_.get(i).type_.isDecimal() && ignoreWildcardDecimals) continue;
-        Preconditions.checkState(resolvedWildcardType != null);
-        if (!children_.get(i).type_.equals(resolvedWildcardType)) {
-          castChild(resolvedWildcardType, i);
-        }
-      } else if (!children_.get(i).type_.matchesType(fnArgs[ix])) {
-        castChild(fnArgs[ix], i);
-      }
-    }
-  }
-
-  /**
-   * Returns the max resolution type of all the wild card decimal types.
-   * Returns null if there are no wild card types.
-   */
-  Type getResolvedWildCardType() throws AnalysisException {
-    Type result = null;
-    Type[] fnArgs = fn_.getArgs();
-    for (int i = 0; i < children_.size(); ++i) {
-      // For varargs, we must compare with the last type in fnArgs.argTypes.
-      int ix = Math.min(fnArgs.length - 1, i);
-      if (!fnArgs[ix].isWildcardDecimal()) continue;
-
-      Type childType = children_.get(i).type_;
-      Preconditions.checkState(!childType.isWildcardDecimal(),
-          "Child expr should have been resolved.");
-      Preconditions.checkState(childType.isScalarType(),
-          "Function should not have resolved with a non-scalar child type.");
-      ScalarType decimalType = (ScalarType) childType;
-      if (result == null) {
-        result = decimalType.getMinResolutionDecimal();
-      } else {
-        result = Type.getAssignmentCompatibleType(result, childType, false);
-      }
-    }
-    if (result != null) {
-      if (result.isNull()) {
-        throw new AnalysisException(
-            "Cannot resolve DECIMAL precision and scale from NULL type.");
-      }
-      Preconditions.checkState(result.isDecimal() && !result.isWildcardDecimal());
-    }
-    return result;
-  }
-
-  /**
-   * Returns true if e is a CastExpr and the target type is a decimal.
-   */
-  private boolean isExplicitCastToDecimal(Expr e) {
-    if (!(e instanceof CastExpr)) return false;
-    CastExpr c = (CastExpr)e;
-    return !c.isImplicit() && c.getType().isDecimal();
-  }
-
-  /**
-   * Returns a clone of child with all decimal-typed NumericLiterals in it explicitly
-   * cast to targetType.
-   */
-  private Expr convertDecimalLiteralsToFloat(Analyzer analyzer, Expr child,
-      Type targetType) throws AnalysisException {
-    if (!targetType.isFloatingPointType() && !targetType.isIntegerType()) return child;
-    if (targetType.isIntegerType()) targetType = Type.DOUBLE;
-    List<NumericLiteral> literals = Lists.newArrayList();
-    child.collectAll(Predicates.instanceOf(NumericLiteral.class), literals);
-    ExprSubstitutionMap smap = new ExprSubstitutionMap();
-    for (NumericLiteral l: literals) {
-      if (!l.getType().isDecimal()) continue;
-      NumericLiteral castLiteral = (NumericLiteral) l.clone();
-      castLiteral.explicitlyCastToFloat(targetType);
-      smap.put(l, castLiteral);
-    }
-    return child.substitute(smap, analyzer, false);
-  }
-
-  /**
-   * Converts numeric literal in the expr tree rooted at this expr to return floating
-   * point types instead of decimals, if possible.
-   *
-   * Decimal has a higher processing cost than floating point and we should not pay
-   * the cost if the user does not require the accuracy. For example:
-   * "select float_col + 1.1" would start out with 1.1 as a decimal(2,1) and the
-   * float_col would be promoted to a high accuracy decimal. This function will identify
-   * this case and treat 1.1 as a float.
-   * In the case of "decimal_col + 1.1", 1.1 would remain a decimal.
-   * In the case of "float_col + cast(1.1 as decimal(2,1))", the result would be a
-   * decimal.
-   *
-   * Another way to think about it is that DecimalLiterals are analyzed as returning
-   * decimals (of the narrowest precision/scale) and we later convert them to a floating
-   * point type when it is consistent with the user's intent.
-   *
-   * TODO: another option is to do constant folding in the FE and then apply this rule.
-   */
-  protected void convertNumericLiteralsFromDecimal(Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkState(this instanceof ArithmeticExpr ||
-        this instanceof BinaryPredicate);
-    if (children_.size() == 1) return; // Do not attempt to convert for unary ops
-    Preconditions.checkState(children_.size() == 2);
-    Type t0 = getChild(0).getType();
-    Type t1 = getChild(1).getType();
-    boolean c0IsConstantDecimal = getChild(0).isConstant() && t0.isDecimal();
-    boolean c1IsConstantDecimal = getChild(1).isConstant() && t1.isDecimal();
-    if (c0IsConstantDecimal && c1IsConstantDecimal) return;
-    if (!c0IsConstantDecimal && !c1IsConstantDecimal) return;
-
-    // Only child(0) or child(1) is a const decimal. See if we can cast it to
-    // the type of the other child.
-    if (c0IsConstantDecimal && !isExplicitCastToDecimal(getChild(0))) {
-      Expr c0 = convertDecimalLiteralsToFloat(analyzer, getChild(0), t1);
-      setChild(0, c0);
-    }
-    if (c1IsConstantDecimal && !isExplicitCastToDecimal(getChild(1))) {
-      Expr c1 = convertDecimalLiteralsToFloat(analyzer, getChild(1), t0);
-      setChild(1, c1);
-    }
-  }
-
-  /**
-   * Helper function: analyze list of exprs
-   */
-  public static void analyze(List<? extends Expr> exprs, Analyzer analyzer)
-      throws AnalysisException {
-    if (exprs == null) return;
-    for (Expr expr: exprs) {
-      expr.analyze(analyzer);
-    }
-  }
-
-  @Override
-  public String toSql() {
-    return (printSqlInParens_) ? "(" + toSqlImpl() + ")" : toSqlImpl();
-  }
-
-  /**
-   * Returns a SQL string representing this expr. Subclasses should override this method
-   * instead of toSql() to ensure that parenthesis are properly added around the toSql().
-   */
-  protected abstract String toSqlImpl();
-
-  // Convert this expr, including all children, to its Thrift representation.
-  public TExpr treeToThrift() {
-    if (type_.isNull()) {
-      // Hack to ensure BE never sees TYPE_NULL. If an expr makes it this far without
-      // being cast to a non-NULL type, the type doesn't matter and we can cast it
-      // arbitrarily.
-      Preconditions.checkState(this instanceof NullLiteral || this instanceof SlotRef);
-      return NullLiteral.create(ScalarType.BOOLEAN).treeToThrift();
-    }
-    TExpr result = new TExpr();
-    treeToThriftHelper(result);
-    return result;
-  }
-
-  // Append a flattened version of this expr, including all children, to 'container'.
-  protected void treeToThriftHelper(TExpr container) {
-    Preconditions.checkState(isAnalyzed_,
-        "Must be analyzed before serializing to thrift. %s", this);
-    Preconditions.checkState(!type_.isWildcardDecimal());
-    // The BE should never see TYPE_NULL
-    Preconditions.checkState(!type_.isNull(), "Expr has type null!");
-    TExprNode msg = new TExprNode();
-    msg.type = type_.toThrift();
-    msg.num_children = children_.size();
-    if (fn_ != null) {
-      msg.setFn(fn_.toThrift());
-      if (fn_.hasVarArgs()) msg.setVararg_start_idx(fn_.getNumArgs() - 1);
-    }
-    toThrift(msg);
-    container.addToNodes(msg);
-    for (Expr child: children_) {
-      child.treeToThriftHelper(container);
-    }
-  }
-
-  // Convert this expr into msg (excluding children), which requires setting
-  // msg.op as well as the expr-specific field.
-  protected abstract void toThrift(TExprNode msg);
-
-  /**
-   * Returns the product of the given exprs' number of distinct values or -1 if any of
-   * the exprs have an invalid number of distinct values.
-   */
-  public static long getNumDistinctValues(List<Expr> exprs) {
-    if (exprs == null || exprs.isEmpty()) return 0;
-    long numDistinctValues = 1;
-    for (Expr expr: exprs) {
-      if (expr.getNumDistinctValues() == -1) {
-        numDistinctValues = -1;
-        break;
-      }
-      numDistinctValues *= expr.getNumDistinctValues();
-    }
-    return numDistinctValues;
-  }
-
-  public static List<TExpr> treesToThrift(List<? extends Expr> exprs) {
-    List<TExpr> result = Lists.newArrayList();
-    for (Expr expr: exprs) {
-      result.add(expr.treeToThrift());
-    }
-    return result;
-  }
-
-  public static com.google.common.base.Predicate<Expr> isAggregatePredicate() {
-    return isAggregatePredicate_;
-  }
-
-  public boolean isAggregate() {
-    return isAggregatePredicate_.apply(this);
-  }
-
-  public List<String> childrenToSql() {
-    List<String> result = Lists.newArrayList();
-    for (Expr child: children_) {
-      result.add(child.toSql());
-    }
-    return result;
-  }
-
-  public String debugString() {
-    return (id_ != null ? "exprid=" + id_.toString() + " " : "") + debugString(children_);
-  }
-
-  public static String debugString(List<? extends Expr> exprs) {
-    if (exprs == null || exprs.isEmpty()) return "";
-    List<String> strings = Lists.newArrayList();
-    for (Expr expr: exprs) {
-      strings.add(expr.debugString());
-    }
-    return Joiner.on(" ").join(strings);
-  }
-
-  public static String toSql(List<? extends Expr> exprs) {
-    if (exprs == null || exprs.isEmpty()) return "";
-    List<String> strings = Lists.newArrayList();
-    for (Expr expr: exprs) {
-      strings.add(expr.toSql());
-    }
-    return Joiner.on(", ").join(strings);
-  }
-
-  /**
-   * Returns true if two expressions are equal. The equality comparison works on analyzed
-   * as well as unanalyzed exprs by ignoring implicit casts (see CastExpr.equals()).
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    // don't compare type, this could be called pre-analysis
-    Expr expr = (Expr) obj;
-    if (children_.size() != expr.children_.size()) return false;
-    for (int i = 0; i < children_.size(); ++i) {
-      if (!children_.get(i).equals(expr.children_.get(i))) return false;
-    }
-    if (fn_ == null && expr.fn_ == null) return true;
-    if (fn_ == null || expr.fn_ == null) return false; // One null, one not
-    // Both fn_'s are not null
-    return fn_.equals(expr.fn_);
-  }
-
-  /**
-   * Return true if l1[i].equals(l2[i]) for all i.
-   */
-  public static <C extends Expr> boolean equalLists(List<C> l1, List<C> l2) {
-    if (l1.size() != l2.size()) return false;
-    Iterator<C> l1Iter = l1.iterator();
-    Iterator<C> l2Iter = l2.iterator();
-    while (l1Iter.hasNext()) {
-      if (!l1Iter.next().equals(l2Iter.next())) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Return true if l1 equals l2 when both lists are interpreted as sets.
-   * TODO: come up with something better than O(n^2)?
-   */
-  public static <C extends Expr> boolean equalSets(List<C> l1, List<C> l2) {
-    if (l1.size() != l2.size()) return false;
-    return l1.containsAll(l2) && l2.containsAll(l1);
-  }
-
-  /**
-   * Return true if l1 is a subset of l2.
-   */
-  public static <C extends Expr> boolean isSubset(List<C> l1, List<C> l2) {
-    if (l1.size() > l2.size()) return false;
-    return l2.containsAll(l1);
-  }
-
-  /**
-   * Return the intersection of l1 and l2.599
-   */
-  public static <C extends Expr> List<C> intersect(List<C> l1, List<C> l2) {
-    List<C> result = new ArrayList<C>();
-    for (C element: l1) {
-      if (l2.contains(element)) result.add(element);
-    }
-    return result;
-  }
-
-  /**
-   * Compute the intersection of l1 and l2, given the smap, and
-   * return the intersecting l1 elements in i1 and the intersecting l2 elements in i2.
-   */
-  public static void intersect(Analyzer analyzer,
-      List<Expr> l1, List<Expr> l2, ExprSubstitutionMap smap,
-      List<Expr> i1, List<Expr> i2) {
-    i1.clear();
-    i2.clear();
-    List<Expr> s1List = Expr.substituteList(l1, smap, analyzer, false);
-    Preconditions.checkState(s1List.size() == l1.size());
-    List<Expr> s2List = Expr.substituteList(l2, smap, analyzer, false);
-    Preconditions.checkState(s2List.size() == l2.size());
-    for (int i = 0; i < s1List.size(); ++i) {
-      Expr s1 = s1List.get(i);
-      for (int j = 0; j < s2List.size(); ++j) {
-        Expr s2 = s2List.get(j);
-        if (s1.equals(s2)) {
-          i1.add(l1.get(i));
-          i2.add(l2.get(j));
-          break;
-        }
-      }
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    if (id_ == null) {
-      throw new UnsupportedOperationException("Expr.hashCode() is not implemented");
-    } else {
-      return id_.asInt();
-    }
-  }
-
-  /**
-   * Gather conjuncts from this expr and return them in a list.
-   * A conjunct is an expr that returns a boolean, e.g., Predicates, function calls,
-   * SlotRefs, etc. Hence, this method is placed here and not in Predicate.
-   */
-  public List<Expr> getConjuncts() {
-    List<Expr> list = Lists.newArrayList();
-    if (this instanceof CompoundPredicate
-        && ((CompoundPredicate) this).getOp() == CompoundPredicate.Operator.AND) {
-      // TODO: we have to convert CompoundPredicate.AND to two expr trees for
-      // conjuncts because NULLs are handled differently for CompoundPredicate.AND
-      // and conjunct evaluation.  This is not optimal for jitted exprs because it
-      // will result in two functions instead of one. Create a new CompoundPredicate
-      // Operator (i.e. CONJUNCT_AND) with the right NULL semantics and use that
-      // instead
-      list.addAll((getChild(0)).getConjuncts());
-      list.addAll((getChild(1)).getConjuncts());
-    } else {
-      list.add(this);
-    }
-    return list;
-  }
-
-  /**
-   * Returns an analyzed clone of 'this' with exprs substituted according to smap.
-   * Removes implicit casts and analysis state while cloning/substituting exprs within
-   * this tree, such that the returned result has minimal implicit casts and types.
-   * Throws if analyzing the post-substitution expr tree failed.
-   * If smap is null, this function is equivalent to clone().
-   * If preserveRootType is true, the resulting expr tree will be cast if necessary to
-   * the type of 'this'.
-   */
-  public Expr trySubstitute(ExprSubstitutionMap smap, Analyzer analyzer,
-      boolean preserveRootType)
-      throws AnalysisException {
-    Expr result = clone();
-    // Return clone to avoid removing casts.
-    if (smap == null) return result;
-    result = result.substituteImpl(smap, analyzer);
-    result.analyze(analyzer);
-    if (preserveRootType && !type_.equals(result.getType())) result = result.castTo(type_);
-    return result;
-  }
-
-  /**
-   * Returns an analyzed clone of 'this' with exprs substituted according to smap.
-   * Removes implicit casts and analysis state while cloning/substituting exprs within
-   * this tree, such that the returned result has minimal implicit casts and types.
-   * Expects the analysis of the post-substitution expr to succeed.
-   * If smap is null, this function is equivalent to clone().
-   * If preserveRootType is true, the resulting expr tree will be cast if necessary to
-   * the type of 'this'.
-   */
-  public Expr substitute(ExprSubstitutionMap smap, Analyzer analyzer,
-      boolean preserveRootType) {
-    try {
-      return trySubstitute(smap, analyzer, preserveRootType);
-    } catch (Exception e) {
-      throw new IllegalStateException("Failed analysis after expr substitution.", e);
-    }
-  }
-
-  public static ArrayList<Expr> trySubstituteList(Iterable<? extends Expr> exprs,
-      ExprSubstitutionMap smap, Analyzer analyzer, boolean preserveRootTypes)
-          throws AnalysisException {
-    if (exprs == null) return null;
-    ArrayList<Expr> result = new ArrayList<Expr>();
-    for (Expr e: exprs) {
-      result.add(e.trySubstitute(smap, analyzer, preserveRootTypes));
-    }
-    return result;
-  }
-
-  public static ArrayList<Expr> substituteList(Iterable<? extends Expr> exprs,
-      ExprSubstitutionMap smap, Analyzer analyzer, boolean preserveRootTypes) {
-    try {
-      return trySubstituteList(exprs, smap, analyzer, preserveRootTypes);
-    } catch (Exception e) {
-      throw new IllegalStateException("Failed analysis after expr substitution.", e);
-    }
-  }
-
-  /**
-   * Recursive method that performs the actual substitution for try/substitute() while
-   * removing implicit casts. Resets the analysis state in all non-SlotRef expressions.
-   * Exprs that have non-child exprs which should be affected by substitutions must
-   * override this method and apply the substitution to such exprs as well.
-   */
-  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
-      throws AnalysisException {
-    if (isImplicitCast()) return getChild(0).substituteImpl(smap, analyzer);
-    if (smap != null) {
-      Expr substExpr = smap.get(this);
-      if (substExpr != null) return substExpr.clone();
-    }
-    for (int i = 0; i < children_.size(); ++i) {
-      children_.set(i, children_.get(i).substituteImpl(smap, analyzer));
-    }
-    // SlotRefs must remain analyzed to support substitution across query blocks. All
-    // other exprs must be analyzed again after the substitution to add implicit casts
-    // and for resolving their correct function signature.
-    if (!(this instanceof SlotRef)) resetAnalysisState();
-    return this;
-  }
-
-  /**
-   * Resets the internal state of this expr produced by analyze().
-   * Only modifies this expr, and not its child exprs.
-   */
-  protected void resetAnalysisState() { isAnalyzed_ = false; }
-
-  /**
-   * Resets the internal analysis state of this expr tree. Removes implicit casts.
-   */
-  public Expr reset() {
-    if (isImplicitCast()) return getChild(0).reset();
-    for (int i = 0; i < children_.size(); ++i) {
-      children_.set(i, children_.get(i).reset());
-    }
-    resetAnalysisState();
-    return this;
-  }
-
-  public static ArrayList<Expr> resetList(ArrayList<Expr> l) {
-    for (int i = 0; i < l.size(); ++i) {
-      l.set(i, l.get(i).reset());
-    }
-    return l;
-  }
-
-  /**
-   * Creates a deep copy of this expr including its analysis state. The method is
-   * abstract in this class to force new Exprs to implement it.
-   */
-  @Override
-  public abstract Expr clone();
-
-  /**
-   * Create a deep copy of 'l'. The elements of the returned list are of the same
-   * type as the input list.
-   */
-  public static <C extends Expr> ArrayList<C> cloneList(List<C> l) {
-    Preconditions.checkNotNull(l);
-    ArrayList<C> result = new ArrayList<C>(l.size());
-    for (Expr element: l) {
-      result.add((C) element.clone());
-    }
-    return result;
-  }
-
-  /**
-   * Removes duplicate exprs (according to equals()).
-   */
-  public static <C extends Expr> void removeDuplicates(List<C> l) {
-    if (l == null) return;
-    ListIterator<C> it1 = l.listIterator();
-    while (it1.hasNext()) {
-      C e1 = it1.next();
-      ListIterator<C> it2 = l.listIterator();
-      boolean duplicate = false;
-      while (it2.hasNext()) {
-        C e2 = it2.next();
-          // only check up to but excluding e1
-        if (e1 == e2) break;
-        if (e1.equals(e2)) {
-          duplicate = true;
-          break;
-        }
-      }
-      if (duplicate) it1.remove();
-    }
-  }
-
-  /**
-   * Removes constant exprs
-   */
-  public static <C extends Expr> void removeConstants(List<C> l) {
-    if (l == null) return;
-    ListIterator<C> it = l.listIterator();
-    while (it.hasNext()) {
-      C e = it.next();
-      if (e.isConstant()) it.remove();
-    }
-  }
-
-  /**
-   * Returns true if expr is fully bound by tid, otherwise false.
-   */
-  public boolean isBound(TupleId tid) {
-    return isBoundByTupleIds(Lists.newArrayList(tid));
-  }
-
-  /**
-   * Returns true if expr is fully bound by tids, otherwise false.
-   */
-  public boolean isBoundByTupleIds(List<TupleId> tids) {
-    for (Expr child: children_) {
-      if (!child.isBoundByTupleIds(tids)) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Returns true if expr is fully bound by slotId, otherwise false.
-   */
-  public boolean isBound(SlotId slotId) {
-    return isBoundBySlotIds(Lists.newArrayList(slotId));
-  }
-
-  /**
-   * Returns true if expr is fully bound by slotIds, otherwise false.
-   */
-  public boolean isBoundBySlotIds(List<SlotId> slotIds) {
-    for (Expr child: children_) {
-      if (!child.isBoundBySlotIds(slotIds)) return false;
-    }
-    return true;
-  }
-
-  public static boolean isBound(List<? extends Expr> exprs, List<TupleId> tids) {
-    for (Expr expr: exprs) {
-      if (!expr.isBoundByTupleIds(tids)) return false;
-    }
-    return true;
-  }
-
-  public static Expr getFirstBoundChild(Expr expr, List<TupleId> tids) {
-    for (Expr child: expr.getChildren()) {
-      if (child.isBoundByTupleIds(tids)) return child;
-    }
-    return null;
-  }
-
-  public void getIds(List<TupleId> tupleIds, List<SlotId> slotIds) {
-    Set<TupleId> tupleIdSet = Sets.newHashSet();
-    Set<SlotId> slotIdSet = Sets.newHashSet();
-    getIdsHelper(tupleIdSet, slotIdSet);
-    if (tupleIds != null) tupleIds.addAll(tupleIdSet);
-    if (slotIds != null) slotIds.addAll(slotIdSet);
-  }
-
-  protected void getIdsHelper(Set<TupleId> tupleIds, Set<SlotId> slotIds) {
-    for (Expr child: children_) {
-      child.getIdsHelper(tupleIds, slotIds);
-    }
-  }
-
-  public static <C extends Expr> void getIds(List<? extends Expr> exprs,
-      List<TupleId> tupleIds, List<SlotId> slotIds) {
-    if (exprs == null) return;
-    for (Expr e: exprs) {
-      e.getIds(tupleIds, slotIds);
-    }
-  }
-
-  /**
-   * @return true if this is an instance of LiteralExpr
-   */
-  public boolean isLiteral() {
-    return this instanceof LiteralExpr;
-  }
-
-  /**
-   * @return true if this expr can be evaluated with Expr::GetValue(NULL),
-   * i.e. if it doesn't contain any references to runtime variables (e.g. slot refs).
-   * Expr subclasses should override this if necessary (e.g. SlotRef, Subquery, etc.
-   * always return false).
-   */
-  public boolean isConstant() {
-    for (Expr expr : children_) {
-      if (!expr.isConstant()) return false;
-    }
-    return true;
-  }
-
-  /**
-   * @return true if this expr is either a null literal or a cast from
-   * a null literal.
-   */
-  public boolean isNullLiteral() {
-    if (this instanceof NullLiteral) return true;
-    if (!(this instanceof CastExpr)) return false;
-    Preconditions.checkState(children_.size() == 1);
-    return children_.get(0).isNullLiteral();
-  }
-
-  /**
-   * Return true if this expr is a scalar subquery.
-   */
-  public boolean isScalarSubquery() {
-    Preconditions.checkState(isAnalyzed_);
-    return this instanceof Subquery && getType().isScalarType();
-  }
-
-  /**
-   * Checks whether this expr returns a boolean type or NULL type.
-   * If not, throws an AnalysisException with an appropriate error message using
-   * 'name' as a prefix. For example, 'name' could be "WHERE clause".
-   * The error message only contains this.toSql() if printExpr is true.
-   */
-  public void checkReturnsBool(String name, boolean printExpr) throws AnalysisException {
-    if (!type_.isBoolean() && !type_.isNull()) {
-      throw new AnalysisException(
-          String.format("%s%s requires return type 'BOOLEAN'. " +
-              "Actual type is '%s'.", name, (printExpr) ? " '" + toSql() + "'" : "",
-              type_.toString()));
-    }
-  }
-
-  /**
-   * Casts this expr to a specific target type. It checks the validity of the cast and
-   * calls uncheckedCastTo().
-   * @param targetType
-   *          type to be cast to
-   * @return cast expression, or converted literal,
-   *         should never return null
-   * @throws AnalysisException
-   *           when an invalid cast is asked for, for example,
-   *           failure to convert a string literal to a date literal
-   */
-  public final Expr castTo(Type targetType) throws AnalysisException {
-    Type type = Type.getAssignmentCompatibleType(this.type_, targetType, false);
-    Preconditions.checkState(type.isValid(), "cast %s to %s", this.type_, targetType);
-    // If the targetType is NULL_TYPE then ignore the cast because NULL_TYPE
-    // is compatible with all types and no cast is necessary.
-    if (targetType.isNull()) return this;
-    if (!targetType.isDecimal()) {
-      // requested cast must be to assignment-compatible type
-      // (which implies no loss of precision)
-      Preconditions.checkArgument(targetType.equals(type),
-          "targetType=" + targetType + " type=" + type);
-    }
-    return uncheckedCastTo(targetType);
-  }
-
-  /**
-   * Create an expression equivalent to 'this' but returning targetType;
-   * possibly by inserting an implicit cast,
-   * or by returning an altogether new expression
-   * or by returning 'this' with a modified return type'.
-   * @param targetType
-   *          type to be cast to
-   * @return cast expression, or converted literal,
-   *         should never return null
-   * @throws AnalysisException
-   *           when an invalid cast is asked for, for example,
-   *           failure to convert a string literal to a date literal
-   */
-  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
-    return new CastExpr(targetType, this);
-  }
-
-  /**
-   * Add a cast expression above child.
-   * If child is a literal expression, we attempt to
-   * convert the value of the child directly, and not insert a cast node.
-   * @param targetType
-   *          type to be cast to
-   * @param childIndex
-   *          index of child to be cast
-   */
-  public void castChild(Type targetType, int childIndex) throws AnalysisException {
-    Expr child = getChild(childIndex);
-    Expr newChild = child.castTo(targetType);
-    setChild(childIndex, newChild);
-  }
-
-
-  /**
-   * Convert child to to targetType, possibly by inserting an implicit cast, or by
-   * returning an altogether new expression, or by returning 'this' with a modified
-   * return type'.
-   * @param targetType
-   *          type to be cast to
-   * @param childIndex
-   *          index of child to be cast
-   */
-  protected void uncheckedCastChild(Type targetType, int childIndex)
-      throws AnalysisException {
-    Expr child = getChild(childIndex);
-    Expr newChild = child.uncheckedCastTo(targetType);
-    setChild(childIndex, newChild);
-  }
-
-  /**
-   * Returns child expr if this expr is an implicit cast, otherwise returns 'this'.
-   */
-  public Expr ignoreImplicitCast() {
-    if (isImplicitCast()) return getChild(0).ignoreImplicitCast();
-    return this;
-  }
-
-  /**
-   * Returns true if 'this' is an implicit cast expr.
-   */
-  public boolean isImplicitCast() {
-    return this instanceof CastExpr && ((CastExpr) this).isImplicit();
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this.getClass())
-        .add("id", id_)
-        .add("type", type_)
-        .add("sel", selectivity_)
-        .add("evalCost", evalCost_)
-        .add("#distinct", numDistinctValues_)
-        .toString();
-  }
-
-  /**
-   * If 'this' is a SlotRef or a Cast that wraps a SlotRef, returns that SlotRef.
-   * Otherwise returns null.
-   */
-  public SlotRef unwrapSlotRef(boolean implicitOnly) {
-    if (this instanceof SlotRef) {
-      return (SlotRef) this;
-    } else if (this instanceof CastExpr
-        && (!implicitOnly || ((CastExpr) this).isImplicit())
-        && getChild(0) instanceof SlotRef) {
-      return (SlotRef) getChild(0);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Returns the descriptor of the scan slot that directly or indirectly produces
-   * the values of 'this' SlotRef. Traverses the source exprs of intermediate slot
-   * descriptors to resolve materialization points (e.g., aggregations).
-   * Returns null if 'e' or any source expr of 'e' is not a SlotRef or cast SlotRef.
-   */
-  public SlotDescriptor findSrcScanSlot() {
-    SlotRef slotRef = unwrapSlotRef(false);
-    if (slotRef == null) return null;
-    SlotDescriptor slotDesc = slotRef.getDesc();
-    if (slotDesc.isScanSlot()) return slotDesc;
-    if (slotDesc.getSourceExprs().size() == 1) {
-      return slotDesc.getSourceExprs().get(0).findSrcScanSlot();
-    }
-    // No known source expr, or there are several source exprs meaning the slot is
-    // has no single source table.
-    return null;
-  }
-
-  /**
-   * Pushes negation to the individual operands of a predicate
-   * tree rooted at 'root'.
-   */
-  public static Expr pushNegationToOperands(Expr root) {
-    Preconditions.checkNotNull(root);
-    if (Expr.IS_NOT_PREDICATE.apply(root)) {
-      try {
-        // Make sure we call function 'negate' only on classes that support it,
-        // otherwise we may recurse infinitely.
-        Method m = root.getChild(0).getClass().getDeclaredMethod(NEGATE_FN);
-        return pushNegationToOperands(root.getChild(0).negate());
-      } catch (NoSuchMethodException e) {
-        // The 'negate' function is not implemented. Break the recursion.
-        return root;
-      }
-    }
-
-    if (root instanceof CompoundPredicate) {
-      Expr left = pushNegationToOperands(root.getChild(0));
-      Expr right = pushNegationToOperands(root.getChild(1));
-      return new CompoundPredicate(((CompoundPredicate)root).getOp(), left, right);
-    }
-
-    return root;
-  }
-
-  /**
-   * Negates a boolean Expr.
-   */
-  public Expr negate() {
-    Preconditions.checkState(type_.getPrimitiveType() == PrimitiveType.BOOLEAN);
-    return new CompoundPredicate(CompoundPredicate.Operator.NOT, this, null);
-  }
-
-  /**
-   * Returns the subquery of an expr. Returns null if this expr does not contain
-   * a subquery.
-   *
-   * TODO: Support predicates with more that one subqueries when we implement
-   * the independent subquery evaluation.
-   */
-  public Subquery getSubquery() {
-    if (!contains(Subquery.class)) return null;
-    List<Subquery> subqueries = Lists.newArrayList();
-    collect(Subquery.class, subqueries);
-    Preconditions.checkState(subqueries.size() == 1);
-    return subqueries.get(0);
-  }
-
-  /**
-   * For children of 'this' that are constant expressions and the type of which has a
-   * LiteralExpr subclass, evaluate them in the BE and substitute the child with the
-   * resulting LiteralExpr. Modifies 'this' in place and does not re-analyze it. Hence,
-   * it is not safe to evaluate the modified expr in the BE as the resolved fn_ may be
-   * incorrect given the new arguments.
-   *
-   * Throws an AnalysisException if the evaluation fails in the BE.
-   *
-   * TODO: Convert to a generic constant expr folding function to be used during analysis.
-   */
-  public void foldConstantChildren(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(isAnalyzed_);
-    Preconditions.checkNotNull(analyzer);
-    for (int i = 0; i < children_.size(); ++i) {
-      Expr child = getChild(i);
-      if (child.isLiteral() || !child.isConstant()) continue;
-      LiteralExpr literalExpr = LiteralExpr.create(child, analyzer.getQueryCtx());
-      if (literalExpr == null) continue;
-      setChild(i, literalExpr);
-    }
-    isAnalyzed_ = false;
-  }
-
-  /**
-   * Returns true iff all of this Expr's children have their costs set.
-   */
-  protected boolean hasChildCosts() {
-    for (Expr child : children_) {
-      if (!child.hasCost()) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Computes and returns the sum of the costs of all of this Expr's children.
-   */
-  protected float getChildCosts() {
-    float cost = 0;
-    for (Expr child : children_) cost += child.getCost();
-    return cost;
-  }
-
-  /**
-   * Returns the average length of the values produced by an Expr
-   * of type string. Returns a default for unknown lengths.
-   */
-  protected static double getAvgStringLength(Expr e) {
-    Preconditions.checkState(e.getType().isStringType());
-    Preconditions.checkState(e.isAnalyzed_);
-
-    SlotRef ref = e.unwrapSlotRef(false);
-    if (ref != null) {
-      if (ref.getDesc() != null && ref.getDesc().getStats().getAvgSize() > 0) {
-        return ref.getDesc().getStats().getAvgSize();
-      } else {
-        return DEFAULT_AVG_STRING_LENGTH;
-      }
-    } else if (e instanceof StringLiteral) {
-      return ((StringLiteral) e).getValue().length();
-    } else {
-      // TODO(tmarshall): Extend this to support other string Exprs, such as
-      // function calls that return string.
-      return DEFAULT_AVG_STRING_LENGTH;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ExprId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ExprId.java b/fe/src/main/java/com/cloudera/impala/analysis/ExprId.java
deleted file mode 100644
index 52292f5..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ExprId.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class ExprId extends Id<ExprId> {
-  // Construction only allowed via an IdGenerator.
-  protected ExprId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<ExprId> createGenerator() {
-    return new IdGenerator<ExprId>() {
-      @Override
-      public ExprId getNextId() { return new ExprId(nextId_++); }
-      @Override
-      public ExprId getMaxId() { return new ExprId(nextId_ - 1); }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ExprSubstitutionMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ExprSubstitutionMap.java b/fe/src/main/java/com/cloudera/impala/analysis/ExprSubstitutionMap.java
deleted file mode 100644
index cbff71a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ExprSubstitutionMap.java
+++ /dev/null
@@ -1,176 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Map of expression substitutions: lhs[i] gets substituted with rhs[i].
- * To support expression substitution across query blocks, rhs exprs must already be
- * analyzed when added to this map. Otherwise, analysis of a SlotRef may fail after
- * substitution, e.g., because the table it refers to is in a different query block
- * that is not visible.
- * See Expr.substitute() and related functions for details on the actual substitution.
- */
-public final class ExprSubstitutionMap {
-  private final static Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class);
-
-  private List<Expr> lhs_; // left-hand side
-  private List<Expr> rhs_; // right-hand side
-
-  public ExprSubstitutionMap() {
-    this(Lists.<Expr>newArrayList(), Lists.<Expr>newArrayList());
-  }
-
-  public ExprSubstitutionMap(List<Expr> lhs, List<Expr> rhs) {
-    lhs_ = lhs;
-    rhs_ = rhs;
-  }
-
-  /**
-   * Add an expr mapping. The rhsExpr must be analyzed to support correct substitution
-   * across query blocks. It is not required that the lhsExpr is analyzed.
-   */
-  public void put(Expr lhsExpr, Expr rhsExpr) {
-    Preconditions.checkState(rhsExpr.isAnalyzed_, "Rhs expr must be analyzed.");
-    lhs_.add(lhsExpr);
-    rhs_.add(rhsExpr);
-  }
-
-  /**
-   * Returns the expr mapped to lhsExpr or null if no mapping to lhsExpr exists.
-   */
-  public Expr get(Expr lhsExpr) {
-    for (int i = 0; i < lhs_.size(); ++i) {
-      if (lhsExpr.equals(lhs_.get(i))) return rhs_.get(i);
-    }
-    return null;
-  }
-
-  /**
-   * Returns true if the smap contains a mapping for lhsExpr.
-   */
-  public boolean containsMappingFor(Expr lhsExpr) {
-    return lhs_.contains(lhsExpr);
-  }
-
-  /**
-   * Return a map  which is equivalent to applying f followed by g,
-   * i.e., g(f()).
-   * Always returns a non-null map.
-   */
-  public static ExprSubstitutionMap compose(ExprSubstitutionMap f, ExprSubstitutionMap g,
-      Analyzer analyzer) {
-    if (f == null && g == null) return new ExprSubstitutionMap();
-    if (f == null) return g;
-    if (g == null) return f;
-    ExprSubstitutionMap result = new ExprSubstitutionMap();
-    // f's substitution targets need to be substituted via g
-    result.lhs_ = Expr.cloneList(f.lhs_);
-    result.rhs_ = Expr.substituteList(f.rhs_, g, analyzer, false);
-
-    // substitution maps are cumulative: the combined map contains all
-    // substitutions from f and g.
-    for (int i = 0; i < g.lhs_.size(); i++) {
-      // If f contains expr1->fn(expr2) and g contains expr2->expr3,
-      // then result must contain expr1->fn(expr3).
-      // The check before adding to result.lhs is to ensure that cases
-      // where expr2.equals(expr1) are handled correctly.
-      // For example f: count(*) -> zeroifnull(count(*))
-      // and g: count(*) -> slotref
-      // result.lhs must only have: count(*) -> zeroifnull(slotref) from f above,
-      // and not count(*) -> slotref from g as well.
-      if (!result.lhs_.contains(g.lhs_.get(i))) {
-        result.lhs_.add(g.lhs_.get(i).clone());
-        result.rhs_.add(g.rhs_.get(i).clone());
-      }
-    }
-
-    result.verify();
-    return result;
-  }
-
-  /**
-   * Returns the union of two substitution maps. Always returns a non-null map.
-   */
-  public static ExprSubstitutionMap combine(ExprSubstitutionMap f,
-      ExprSubstitutionMap g) {
-    if (f == null && g == null) return new ExprSubstitutionMap();
-    if (f == null) return g;
-    if (g == null) return f;
-    ExprSubstitutionMap result = new ExprSubstitutionMap();
-    result.lhs_ = Lists.newArrayList(f.lhs_);
-    result.lhs_.addAll(g.lhs_);
-    result.rhs_ = Lists.newArrayList(f.rhs_);
-    result.rhs_.addAll(g.rhs_);
-    result.verify();
-    return result;
-  }
-
-  public void substituteLhs(ExprSubstitutionMap lhsSmap, Analyzer analyzer) {
-    lhs_ = Expr.substituteList(lhs_, lhsSmap, analyzer, false);
-  }
-
-  public List<Expr> getLhs() { return lhs_; }
-  public List<Expr> getRhs() { return rhs_; }
-
-  public int size() { return lhs_.size(); }
-
-  public String debugString() {
-    Preconditions.checkState(lhs_.size() == rhs_.size());
-    List<String> output = Lists.newArrayList();
-    for (int i = 0; i < lhs_.size(); ++i) {
-      output.add(lhs_.get(i).toSql() + ":" + rhs_.get(i).toSql());
-      output.add("(" + lhs_.get(i).debugString() + ":" + rhs_.get(i).debugString() + ")");
-    }
-    return "smap(" + Joiner.on(" ").join(output) + ")";
-  }
-
-  /**
-   * Verifies the internal state of this smap: Checks that the lhs_ has no duplicates,
-   * and that all rhs exprs are analyzed.
-   */
-  private void verify() {
-    for (int i = 0; i < lhs_.size(); ++i) {
-      for (int j = i + 1; j < lhs_.size(); ++j) {
-        if (lhs_.get(i).equals(lhs_.get(j))) {
-          LOG.info("verify: smap=" + this.debugString());
-          Preconditions.checkState(false);
-        }
-      }
-      Preconditions.checkState(rhs_.get(i).isAnalyzed_);
-    }
-  }
-
-  public void clear() {
-    lhs_.clear();
-    rhs_.clear();
-  }
-
-  @Override
-  public ExprSubstitutionMap clone() {
-    return new ExprSubstitutionMap(Expr.cloneList(lhs_), Expr.cloneList(rhs_));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ExtractFromExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ExtractFromExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/ExtractFromExpr.java
deleted file mode 100644
index 48b9fb3..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ExtractFromExpr.java
+++ /dev/null
@@ -1,111 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.Set;
-
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExtractField;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of an EXTRACT(<Time Unit> FROM <Datetime Expr>) expression. EXTRACT(
- * <Datetime Expr>, <String>) is not handled by FunctionCallExpr.
- */
-public class ExtractFromExpr extends FunctionCallExpr {
-
-  // Behaves like an immutable linked hash set containing the TExtractFields in the same
-  // order as declared.
-  private static final Set<String> EXTRACT_FIELDS;
-  static {
-    ImmutableSet.Builder<String> builder = new ImmutableSet.Builder<String>();
-    for (TExtractField extractField: TExtractField.values()) {
-      if (extractField != TExtractField.INVALID_FIELD) {
-        builder.add(extractField.name());
-      }
-    }
-    EXTRACT_FIELDS = builder.build();
-  }
-
-  public ExtractFromExpr(FunctionName fnName, String extractFieldIdent, Expr e) {
-    // Note that the arguments are swapped so that they align with the EXTRACT function.
-    // There is no EXTRACT(STRING, TIMESTAMP) function because it conflicts with
-    // EXTRACT(TIMESTAMP, STRING) if STRINGs are used for TIMESTAMPs with implicit
-    // casting.
-    super(fnName, Lists.newArrayList(e, new StringLiteral(extractFieldIdent)));
-    type_ = Type.INT;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected ExtractFromExpr(ExtractFromExpr other) {
-    super(other);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    getFnName().analyze(analyzer);
-    if (!getFnName().getFunction().equals("extract")) {
-      throw new AnalysisException("Function " + getFnName().getFunction().toUpperCase()
-          + " does not accept the keyword FROM.");
-    }
-    if ((getFnName().getDb() != null)
-        && !getFnName().getDb().equals(Catalog.BUILTINS_DB)) {
-      throw new AnalysisException("Function " + getFnName().toString() + " conflicts " +
-          "with the EXTRACT builtin.");
-    }
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    String extractFieldIdent = ((StringLiteral)children_.get(1)).getValue();
-    Preconditions.checkNotNull(extractFieldIdent);
-    if (!EXTRACT_FIELDS.contains(extractFieldIdent.toUpperCase())) {
-      throw new AnalysisException("Time unit '" + extractFieldIdent + "' in expression '"
-          + toSql() + "' is invalid. Expected one of "
-          + Joiner.on(", ").join(EXTRACT_FIELDS) + ".");
-    }
-  }
-
-  @Override
-  protected String getFunctionNotFoundError(Type[] argTypes) {
-    Expr e = children_.get(0);
-    return "Expression '" + e.toSql() + "' in '" + toSql() + "' has a return type of "
-          + e.getType().toSql() + " but a TIMESTAMP is required.";
-  }
-
-  @Override
-  public String toSqlImpl() {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append(getFnName().toString().toUpperCase());
-    strBuilder.append("(");
-    strBuilder.append(((StringLiteral)getChild(1)).getValue());
-    strBuilder.append(" FROM ");
-    strBuilder.append(getChild(0).toSql());
-    strBuilder.append(")");
-    return strBuilder.toString();
-  }
-
-  @Override
-  public Expr clone() { return new ExtractFromExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/FromClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/FromClause.java b/fe/src/main/java/com/cloudera/impala/analysis/FromClause.java
deleted file mode 100644
index bbe6f23..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/FromClause.java
+++ /dev/null
@@ -1,129 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Wraps a list of TableRef instances that form a FROM clause, allowing them to be
- * analyzed independently of the statement using them. To increase the flexibility of
- * the class it implements the Iterable interface.
- */
-public class FromClause implements ParseNode, Iterable<TableRef> {
-
-  private final ArrayList<TableRef> tableRefs_;
-
-  private boolean analyzed_ = false;
-
-  public FromClause(List<TableRef> tableRefs) {
-    tableRefs_ = Lists.newArrayList(tableRefs);
-    // Set left table refs to ensure correct toSql() before analysis.
-    for (int i = 1; i < tableRefs_.size(); ++i) {
-      tableRefs_.get(i).setLeftTblRef(tableRefs_.get(i - 1));
-    }
-  }
-
-  public FromClause() { tableRefs_ = Lists.newArrayList(); }
-  public List<TableRef> getTableRefs() { return tableRefs_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (analyzed_) return;
-
-    if (tableRefs_.isEmpty()) {
-      analyzed_ = true;
-      return;
-    }
-
-    // Start out with table refs to establish aliases.
-    TableRef leftTblRef = null;  // the one to the left of tblRef
-    for (int i = 0; i < tableRefs_.size(); ++i) {
-      // Resolve and replace non-InlineViewRef table refs with a BaseTableRef or ViewRef.
-      TableRef tblRef = tableRefs_.get(i);
-      tblRef = analyzer.resolveTableRef(tblRef);
-      tableRefs_.set(i, Preconditions.checkNotNull(tblRef));
-      tblRef.setLeftTblRef(leftTblRef);
-      try {
-        tblRef.analyze(analyzer);
-      } catch (AnalysisException e) {
-        // Only re-throw the exception if no tables are missing.
-        if (analyzer.getMissingTbls().isEmpty()) throw e;
-      }
-      leftTblRef = tblRef;
-    }
-
-    // All tableRefs have been analyzed, but at least one table is missing metadata.
-    if (!analyzer.getMissingTbls().isEmpty()) {
-      throw new AnalysisException("Found missing tables. Aborting analysis.");
-    }
-    analyzed_ = true;
-  }
-
-  public FromClause clone() {
-    ArrayList<TableRef> clone = Lists.newArrayList();
-    for (TableRef tblRef: tableRefs_) clone.add(tblRef.clone());
-    return new FromClause(clone);
-  }
-
-  public void reset() {
-    for (int i = 0; i < size(); ++i) {
-      TableRef origTblRef = get(i);
-      if (origTblRef.isResolved() && !(origTblRef instanceof InlineViewRef)) {
-        // Replace resolved table refs with unresolved ones.
-        TableRef newTblRef = new TableRef(origTblRef);
-        // Use the fully qualified raw path to preserve the original resolution.
-        // Otherwise, non-fully qualified paths might incorrectly match a local view.
-        // TODO for 2.3: This full qualification preserves analysis state which is
-        // contrary to the intended semantics of reset(). We could address this issue by
-        // changing the WITH-clause analysis to register local views that have
-        // fully-qualified table refs, and then remove the full qualification here.
-        newTblRef.rawPath_ = origTblRef.getResolvedPath().getFullyQualifiedRawPath();
-        set(i, newTblRef);
-      }
-      get(i).reset();
-    }
-    this.analyzed_ = false;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder builder = new StringBuilder();
-    if (!tableRefs_.isEmpty()) {
-      builder.append(" FROM ");
-      for (int i = 0; i < tableRefs_.size(); ++i) {
-        builder.append(tableRefs_.get(i).toSql());
-      }
-    }
-    return builder.toString();
-  }
-
-  public boolean isEmpty() { return tableRefs_.isEmpty(); }
-
-  @Override
-  public Iterator<TableRef> iterator() { return tableRefs_.iterator(); }
-  public int size() { return tableRefs_.size(); }
-  public TableRef get(int i) { return tableRefs_.get(i); }
-  public void set(int i, TableRef tableRef) { tableRefs_.set(i, tableRef); }
-  public void add(TableRef t) { tableRefs_.add(t); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/FunctionArgs.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/FunctionArgs.java b/fe/src/main/java/com/cloudera/impala/analysis/FunctionArgs.java
deleted file mode 100644
index 998c5fc..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/FunctionArgs.java
+++ /dev/null
@@ -1,67 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-// Wrapper class around argument types and if it has varArgs
-public class FunctionArgs implements ParseNode {
-  private final ArrayList<TypeDef> argTypeDefs_;
-  private boolean hasVarArgs_;
-
-  // Result of analysis.
-  private ArrayList<Type> argTypes_;
-
-  public FunctionArgs() {
-    argTypeDefs_ = Lists.newArrayList();
-    hasVarArgs_ = false;
-  }
-
-  public FunctionArgs(ArrayList<TypeDef> argTypeDefs, boolean varArgs) {
-    argTypeDefs_ = argTypeDefs;
-    hasVarArgs_ = varArgs;
-    if (varArgs) Preconditions.checkState(argTypeDefs.size() > 0);
-  }
-
-  public void setHasVarArgs(boolean b) { {
-    Preconditions.checkState(argTypeDefs_.size() > 0);
-    hasVarArgs_ = b; }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    ArrayList<Type> argTypes = Lists.newArrayListWithCapacity(argTypeDefs_.size());
-    for (TypeDef typeDef: argTypeDefs_) {
-      typeDef.analyze(analyzer);
-      argTypes.add(typeDef.getType());
-    }
-    argTypes_ = argTypes;
-  }
-
-  public ArrayList<TypeDef> getArgTypeDefs() { return argTypeDefs_; }
-  public ArrayList<Type> getArgTypes() { return argTypes_; }
-  public boolean hasVarArgs() { return hasVarArgs_; }
-
-  @Override
-  public String toSql() { return null; }
-}


[61/61] incubator-impala git commit: IMPALA-4180: Synchronize accesses to RuntimeState::reader_contexts_

Posted by kw...@apache.org.
IMPALA-4180: Synchronize accesses to RuntimeState::reader_contexts_

HdfsScanNodeBase::Close() may add its outstanding DiskIO context to
RuntimeState::reader_contexts_ to be unregistered later when the
fragment is closed. In a plan fragment with multiple HDFS scan nodes,
it's possible for HdfsScanNodeBase::Close() to be called concurrently.
To allow safe concurrent accesses, this change adds a SpinLock to
synchronize accesses to 'reader_contexts_' in RuntimeState.

Change-Id: I911fda526a99514b12f88a3e9fb5952ea4fe1973
Reviewed-on: http://gerrit.cloudera.org:8080/4558
Reviewed-by: Dan Hecht <dh...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/2a31fbdb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/2a31fbdb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/2a31fbdb

Branch: refs/heads/master
Commit: 2a31fbdbfac9a7092c96e4ab9894e0db0e4ce9ca
Parents: f640b3a
Author: Michael Ho <kw...@cloudera.com>
Authored: Wed Sep 28 14:32:55 2016 -0700
Committer: Internal Jenkins <cl...@gerrit.cloudera.org>
Committed: Fri Sep 30 01:21:05 2016 +0000

----------------------------------------------------------------------
 be/src/exec/hdfs-scan-node-base.cc                   |  2 +-
 be/src/runtime/plan-fragment-executor.cc             |  4 +---
 be/src/runtime/runtime-state.cc                      | 13 +++++++++++++
 be/src/runtime/runtime-state.h                       | 12 +++++++++++-
 .../queries/QueryTest/single-node-nlj.test           | 15 +++++++++++++--
 5 files changed, 39 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/2a31fbdb/be/src/exec/hdfs-scan-node-base.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/hdfs-scan-node-base.cc b/be/src/exec/hdfs-scan-node-base.cc
index c03817b..4acf3f5 100644
--- a/be/src/exec/hdfs-scan-node-base.cc
+++ b/be/src/exec/hdfs-scan-node-base.cc
@@ -446,7 +446,7 @@ void HdfsScanNodeBase::Close(RuntimeState* state) {
     // There may still be io buffers used by parent nodes so we can't unregister the
     // reader context yet. The runtime state keeps a list of all the reader contexts and
     // they are unregistered when the fragment is closed.
-    state->reader_contexts()->push_back(reader_context_);
+    state->AcquireReaderContext(reader_context_);
     // Need to wait for all the active scanner threads to finish to ensure there is no
     // more memory tracked by this scan node's mem tracker.
     state->io_mgr()->CancelContext(reader_context_, true);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/2a31fbdb/be/src/runtime/plan-fragment-executor.cc
----------------------------------------------------------------------
diff --git a/be/src/runtime/plan-fragment-executor.cc b/be/src/runtime/plan-fragment-executor.cc
index 7300f44..e0d314b 100644
--- a/be/src/runtime/plan-fragment-executor.cc
+++ b/be/src/runtime/plan-fragment-executor.cc
@@ -508,9 +508,7 @@ void PlanFragmentExecutor::Close() {
   // Prepare may not have been called, which sets runtime_state_
   if (runtime_state_.get() != NULL) {
     if (plan_ != NULL) plan_->Close(runtime_state_.get());
-    for (DiskIoRequestContext* context: *runtime_state_->reader_contexts()) {
-      runtime_state_->io_mgr()->UnregisterContext(context);
-    }
+    runtime_state_->UnregisterReaderContexts();
     exec_env_->thread_mgr()->UnregisterPool(runtime_state_->resource_pool());
     runtime_state_->desc_tbl().ClosePartitionExprs(runtime_state_.get());
     runtime_state_->filter_bank()->Close();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/2a31fbdb/be/src/runtime/runtime-state.cc
----------------------------------------------------------------------
diff --git a/be/src/runtime/runtime-state.cc b/be/src/runtime/runtime-state.cc
index 5249076..a05b3ef 100644
--- a/be/src/runtime/runtime-state.cc
+++ b/be/src/runtime/runtime-state.cc
@@ -292,4 +292,17 @@ Status RuntimeState::GetCodegen(LlvmCodeGen** codegen, bool initialize) {
   return Status::OK();
 }
 
+void RuntimeState::AcquireReaderContext(DiskIoRequestContext* reader_context) {
+  boost::lock_guard<SpinLock> l(reader_contexts_lock_);
+  reader_contexts_.push_back(reader_context);
+}
+
+void RuntimeState::UnregisterReaderContexts() {
+  boost::lock_guard<SpinLock> l(reader_contexts_lock_);
+  for (DiskIoRequestContext* context : reader_contexts_) {
+    io_mgr()->UnregisterContext(context);
+  }
+  reader_contexts_.clear();
+}
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/2a31fbdb/be/src/runtime/runtime-state.h
----------------------------------------------------------------------
diff --git a/be/src/runtime/runtime-state.h b/be/src/runtime/runtime-state.h
index 0bf9db5..3496d9c 100644
--- a/be/src/runtime/runtime-state.h
+++ b/be/src/runtime/runtime-state.h
@@ -138,7 +138,6 @@ class RuntimeState {
   ThreadResourceMgr::ResourcePool* resource_pool() { return resource_pool_; }
 
   FileMoveMap* hdfs_files_to_move() { return &hdfs_files_to_move_; }
-  std::vector<DiskIoRequestContext*>* reader_contexts() { return &reader_contexts_; }
 
   void set_fragment_root_id(PlanNodeId id) {
     DCHECK_EQ(root_node_id_, -1) << "Should not set this twice.";
@@ -163,6 +162,14 @@ class RuntimeState {
   /// even when codegen is enabled if nothing has been codegen'd.
   bool codegen_created() const { return codegen_.get() != NULL; }
 
+  /// Takes ownership of a scan node's reader context and plan fragment executor will call
+  /// UnregisterReaderContexts() to unregister it when the fragment is closed. The IO
+  /// buffers may still be in use and thus the deferred unregistration.
+  void AcquireReaderContext(DiskIoRequestContext* reader_context);
+
+  /// Unregisters all reader contexts acquired through AcquireReaderContext().
+  void UnregisterReaderContexts();
+
   /// Returns codegen_ in 'codegen'. If 'initialize' is true, codegen_ will be created if
   /// it has not been initialized by a previous call already. If 'initialize' is false,
   /// 'codegen' will be set to NULL if codegen_ has not been initialized.
@@ -344,6 +351,9 @@ class RuntimeState {
   Status query_status_;
 
   /// Reader contexts that need to be closed when the fragment is closed.
+  /// Synchronization is needed if there are multiple scan nodes in a plan fragment and
+  /// Close() may be called on them concurrently (see IMPALA-4180).
+  SpinLock reader_contexts_lock_;
   std::vector<DiskIoRequestContext*> reader_contexts_;
 
   /// BufferedBlockMgr object used to allocate and manage blocks of input data in memory

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/2a31fbdb/testdata/workloads/functional-query/queries/QueryTest/single-node-nlj.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/single-node-nlj.test b/testdata/workloads/functional-query/queries/QueryTest/single-node-nlj.test
index 49cdf9d..fa1ccfc 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/single-node-nlj.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/single-node-nlj.test
@@ -149,7 +149,6 @@ group by id) v2;
 ---- TYPES
 TINYINT,INT,BIGINT
 ====
-====
 ---- QUERY
 # Regression test for IMPALA-561: Multiple scan nodes in a plan fragment.
 select count(*)
@@ -161,6 +160,18 @@ left join functional.alltypes a2 on a2.tinyint_col >= 1
 BIGINT
 ====
 ---- QUERY
+# Regression test for IMPALA-4180: a single node plan with blocking join node
+# and multiple top-n + scan nodes to trigger concurrent Close() on scan nodes.
+with t as (select int_col x from functional.alltypestiny order by id limit 2)
+select * from t t1 left join t t2 on t1.x > 0
+---- RESULTS
+0,NULL
+1,0
+1,1
+---- TYPES
+INT,INT
+====
+---- QUERY
 # Right non-equi-join with empty build.
 select straight_join at.id
 from alltypes at
@@ -211,4 +222,4 @@ limit 5
 7295
 ---- TYPES
 INT
-====
+====
\ No newline at end of file


[13/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/JniCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/JniCatalog.java b/fe/src/main/java/com/cloudera/impala/service/JniCatalog.java
deleted file mode 100644
index bc71a49..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/JniCatalog.java
+++ /dev/null
@@ -1,255 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TGetAllCatalogObjectsResponse;
-import com.cloudera.impala.thrift.TGetDbsParams;
-import com.cloudera.impala.thrift.TGetDbsResult;
-import com.cloudera.impala.thrift.TGetFunctionsRequest;
-import com.cloudera.impala.thrift.TGetFunctionsResponse;
-import com.cloudera.impala.thrift.TGetTablesParams;
-import com.cloudera.impala.thrift.TGetTablesResult;
-import com.cloudera.impala.thrift.TLogLevel;
-import com.cloudera.impala.thrift.TPrioritizeLoadRequest;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TSentryAdminCheckRequest;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogRequest;
-import com.cloudera.impala.util.GlogAppender;
-import com.cloudera.impala.util.PatternMatcher;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * JNI-callable interface for the CatalogService. The main point is to serialize
- * and de-serialize thrift structures between C and Java parts of the CatalogService.
- */
-public class JniCatalog {
-  private final static Logger LOG = LoggerFactory.getLogger(JniCatalog.class);
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-  private final CatalogServiceCatalog catalog_;
-  private final CatalogOpExecutor catalogOpExecutor_;
-
-  // A unique identifier for this instance of the Catalog Service.
-  private static final TUniqueId catalogServiceId_ = generateId();
-
-  private static TUniqueId generateId() {
-    UUID uuid = UUID.randomUUID();
-    return new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
-  }
-
-  public JniCatalog(boolean loadInBackground, int numMetadataLoadingThreads,
-      String sentryServiceConfig, int impalaLogLevel, int otherLogLevel,
-      boolean allowAuthToLocal, String kerberosPrincipal) throws InternalException {
-    BackendConfig.setAuthToLocal(allowAuthToLocal);
-    Preconditions.checkArgument(numMetadataLoadingThreads > 0);
-    // This trick saves having to pass a TLogLevel enum, which is an object and more
-    // complex to pass through JNI.
-    GlogAppender.Install(TLogLevel.values()[impalaLogLevel],
-        TLogLevel.values()[otherLogLevel]);
-
-    // Check if the Sentry Service is configured. If so, create a configuration object.
-    SentryConfig sentryConfig = null;
-    if (!Strings.isNullOrEmpty(sentryServiceConfig)) {
-      sentryConfig = new SentryConfig(sentryServiceConfig);
-      sentryConfig.loadConfig();
-    }
-    LOG.info(JniUtil.getJavaVersion());
-
-    catalog_ = new CatalogServiceCatalog(loadInBackground,
-        numMetadataLoadingThreads, sentryConfig, getServiceId(), kerberosPrincipal);
-    try {
-      catalog_.reset();
-    } catch (CatalogException e) {
-      LOG.error("Error initialializing Catalog. Please run 'invalidate metadata'", e);
-    }
-    catalogOpExecutor_ = new CatalogOpExecutor(catalog_);
-  }
-
-  public static TUniqueId getServiceId() { return catalogServiceId_; }
-
-  /**
-   * Gets all catalog objects
-   */
-  public byte[] getCatalogObjects(long from_version) throws ImpalaException, TException {
-    TGetAllCatalogObjectsResponse resp =
-        catalog_.getCatalogObjects(from_version);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(resp);
-  }
-
-  /**
-   * Gets the current catalog version.
-   */
-  public long getCatalogVersion() {
-    return catalog_.getCatalogVersion();
-  }
-
-  /**
-   * Executes the given DDL request and returns the result.
-   */
-  public byte[] execDdl(byte[] thriftDdlExecReq) throws ImpalaException {
-    TDdlExecRequest params = new TDdlExecRequest();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftDdlExecReq);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(catalogOpExecutor_.execDdlRequest(params));
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Execute a reset metadata statement. See comment in CatalogOpExecutor.java.
-   */
-  public byte[] resetMetadata(byte[] thriftResetMetadataReq)
-      throws ImpalaException, TException {
-    TResetMetadataRequest req = new TResetMetadataRequest();
-    JniUtil.deserializeThrift(protocolFactory_, req, thriftResetMetadataReq);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(catalogOpExecutor_.execResetMetadata(req));
-  }
-
-  /**
-   * Returns a list of databases matching an optional pattern.
-   * The argument is a serialized TGetDbParams object.
-   * The return type is a serialized TGetDbResult object.
-   */
-  public byte[] getDbs(byte[] thriftGetTablesParams) throws ImpalaException,
-      TException {
-    TGetDbsParams params = new TGetDbsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftGetTablesParams);
-    List<Db> dbs = catalog_.getDbs(PatternMatcher.MATCHER_MATCH_ALL);
-    TGetDbsResult result = new TGetDbsResult();
-    List<TDatabase> tDbs = Lists.newArrayListWithCapacity(dbs.size());
-    for (Db db: dbs) tDbs.add(db.toThrift());
-    result.setDbs(tDbs);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(result);
-  }
-
-  /**
-   * Returns a list of table names matching an optional pattern.
-   * The argument is a serialized TGetTablesParams object.
-   * The return type is a serialized TGetTablesResult object.
-   */
-  public byte[] getTableNames(byte[] thriftGetTablesParams) throws ImpalaException,
-      TException {
-    TGetTablesParams params = new TGetTablesParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftGetTablesParams);
-    List<String> tables = catalog_.getTableNames(params.db,
-        PatternMatcher.createHivePatternMatcher(params.pattern));
-    TGetTablesResult result = new TGetTablesResult();
-    result.setTables(tables);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(result);
-  }
-
-  /**
-   * Gets the thrift representation of a catalog object.
-   */
-  public byte[] getCatalogObject(byte[] thriftParams) throws ImpalaException,
-      TException {
-    TCatalogObject objectDescription = new TCatalogObject();
-    JniUtil.deserializeThrift(protocolFactory_, objectDescription, thriftParams);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(catalog_.getTCatalogObject(objectDescription));
-  }
-
-  /**
-   * See comment in CatalogServiceCatalog.
-   */
-  public byte[] getFunctions(byte[] thriftParams) throws ImpalaException,
-      TException {
-    TGetFunctionsRequest request = new TGetFunctionsRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, thriftParams);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    if (!request.isSetDb_name()) {
-      throw new InternalException("Database name must be set in call to " +
-          "getFunctions()");
-    }
-
-    // Get all the functions and convert them to their Thrift representation.
-    List<Function> fns = catalog_.getFunctions(request.getDb_name());
-    TGetFunctionsResponse response = new TGetFunctionsResponse();
-    response.setFunctions(new ArrayList<TFunction>(fns.size()));
-    for (Function fn: fns) {
-      response.addToFunctions(fn.toThrift());
-    }
-
-    return serializer.serialize(response);
-  }
-
-  public void prioritizeLoad(byte[] thriftLoadReq) throws ImpalaException,
-      TException  {
-    TPrioritizeLoadRequest request = new TPrioritizeLoadRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, thriftLoadReq);
-    catalog_.prioritizeLoad(request.getObject_descs());
-  }
-
-  /**
-   * Verifies whether the user is configured as an admin on the Sentry Service. Throws
-   * an AuthorizationException if the user does not have admin privileges or if there
-   * were errors communicating with the Sentry Service.
-   */
-  public void checkUserSentryAdmin(byte[] thriftReq) throws ImpalaException,
-      TException  {
-    TSentryAdminCheckRequest request = new TSentryAdminCheckRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, thriftReq);
-    catalog_.getSentryProxy().checkUserSentryAdmin(
-        new User(request.getHeader().getRequesting_user()));
-  }
-
-  /**
-   * Process any updates to the metastore required after a query executes.
-   * The argument is a serialized TCatalogUpdate.
-   */
-  public byte[] updateCatalog(byte[] thriftUpdateCatalog) throws ImpalaException,
-      TException  {
-    TUpdateCatalogRequest request = new TUpdateCatalogRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, thriftUpdateCatalog);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(catalogOpExecutor_.updateCatalog(request));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/JniFrontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/JniFrontend.java b/fe/src/main/java/com/cloudera/impala/service/JniFrontend.java
deleted file mode 100644
index af9c00c..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/JniFrontend.java
+++ /dev/null
@@ -1,743 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.log4j.Appender;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.log4j.FileAppender;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.ToSqlUtils;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.ImpalaInternalAdminUser;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDescribeDbParams;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TDescribeTableParams;
-import com.cloudera.impala.thrift.TExecRequest;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TGetAllHadoopConfigsResponse;
-import com.cloudera.impala.thrift.TGetDataSrcsParams;
-import com.cloudera.impala.thrift.TGetDataSrcsResult;
-import com.cloudera.impala.thrift.TGetDbsParams;
-import com.cloudera.impala.thrift.TGetDbsResult;
-import com.cloudera.impala.thrift.TGetFunctionsParams;
-import com.cloudera.impala.thrift.TGetFunctionsResult;
-import com.cloudera.impala.thrift.TGetHadoopConfigRequest;
-import com.cloudera.impala.thrift.TGetHadoopConfigResponse;
-import com.cloudera.impala.thrift.TGetTablesParams;
-import com.cloudera.impala.thrift.TGetTablesResult;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TLoadDataResp;
-import com.cloudera.impala.thrift.TLogLevel;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TShowGrantRoleParams;
-import com.cloudera.impala.thrift.TShowRolesParams;
-import com.cloudera.impala.thrift.TShowRolesResult;
-import com.cloudera.impala.thrift.TShowStatsParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.cloudera.impala.util.GlogAppender;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.TSessionStateUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * JNI-callable interface onto a wrapped Frontend instance. The main point is to serialise
- * and deserialise thrift structures between C and Java.
- */
-public class JniFrontend {
-  private final static Logger LOG = LoggerFactory.getLogger(JniFrontend.class);
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-  private final Frontend frontend_;
-
-  // Required minimum value (in milliseconds) for the HDFS config
-  // 'dfs.client.file-block-storage-locations.timeout.millis'
-  private static final long MIN_DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
-      10 * 1000;
-
-  /**
-   * Create a new instance of the Jni Frontend.
-   */
-  public JniFrontend(boolean lazy, String serverName, String authorizationPolicyFile,
-      String sentryConfigFile, String authPolicyProviderClass, int impalaLogLevel,
-      int otherLogLevel, boolean allowAuthToLocal) throws InternalException {
-    BackendConfig.setAuthToLocal(allowAuthToLocal);
-    GlogAppender.Install(TLogLevel.values()[impalaLogLevel],
-        TLogLevel.values()[otherLogLevel]);
-
-    // Validate the authorization configuration before initializing the Frontend.
-    // If there are any configuration problems Impala startup will fail.
-    AuthorizationConfig authConfig = new AuthorizationConfig(serverName,
-        authorizationPolicyFile, sentryConfigFile, authPolicyProviderClass);
-    authConfig.validateConfig();
-    if (authConfig.isEnabled()) {
-      LOG.info(String.format("Authorization is 'ENABLED' using %s",
-          authConfig.isFileBasedPolicy() ? " file based policy from: " +
-          authConfig.getPolicyFile() : " using Sentry Policy Service."));
-    } else {
-      LOG.info("Authorization is 'DISABLED'.");
-    }
-    LOG.info(JniUtil.getJavaVersion());
-
-    frontend_ = new Frontend(authConfig);
-  }
-
-  /**
-   * Jni wrapper for Frontend.createExecRequest(). Accepts a serialized
-   * TQueryContext; returns a serialized TQueryExecRequest.
-   */
-  public byte[] createExecRequest(byte[] thriftQueryContext)
-      throws ImpalaException {
-    TQueryCtx queryCtx = new TQueryCtx();
-    JniUtil.deserializeThrift(protocolFactory_, queryCtx, thriftQueryContext);
-
-    StringBuilder explainString = new StringBuilder();
-    TExecRequest result = frontend_.createExecRequest(queryCtx, explainString);
-    if (explainString.length() > 0) LOG.debug(explainString.toString());
-
-    // TODO: avoid creating serializer for each query?
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  // Deserialize and merge each thrift catalog update into a single merged update
-  public byte[] updateCatalogCache(byte[][] thriftCatalogUpdates) throws ImpalaException {
-    TUniqueId defaultCatalogServiceId = new TUniqueId(0L, 0L);
-    TUpdateCatalogCacheRequest mergedUpdateRequest = new TUpdateCatalogCacheRequest(
-        false, defaultCatalogServiceId, new ArrayList<TCatalogObject>(),
-        new ArrayList<TCatalogObject>());
-    for (byte[] catalogUpdate: thriftCatalogUpdates) {
-      TUpdateCatalogCacheRequest incrementalRequest = new TUpdateCatalogCacheRequest();
-      JniUtil.deserializeThrift(protocolFactory_, incrementalRequest, catalogUpdate);
-      mergedUpdateRequest.is_delta |= incrementalRequest.is_delta;
-      if (!incrementalRequest.getCatalog_service_id().equals(defaultCatalogServiceId)) {
-        mergedUpdateRequest.setCatalog_service_id(
-            incrementalRequest.getCatalog_service_id());
-      }
-      mergedUpdateRequest.getUpdated_objects().addAll(
-          incrementalRequest.getUpdated_objects());
-      mergedUpdateRequest.getRemoved_objects().addAll(
-          incrementalRequest.getRemoved_objects());
-    }
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(frontend_.updateCatalogCache(mergedUpdateRequest));
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Jni wrapper for Frontend.updateMembership(). Accepts a serialized
-   * TUpdateMembershipRequest.
-   */
-  public void updateMembership(byte[] thriftMembershipUpdate) throws ImpalaException {
-    TUpdateMembershipRequest req = new TUpdateMembershipRequest();
-    JniUtil.deserializeThrift(protocolFactory_, req, thriftMembershipUpdate);
-    frontend_.updateMembership(req);
-  }
-
-  /**
-   * Loads a table or partition with one or more data files. If the "overwrite" flag
-   * in the request is true, all existing data in the table/partition will be replaced.
-   * If the "overwrite" flag is false, the files will be added alongside any existing
-   * data files.
-   */
-  public byte[] loadTableData(byte[] thriftLoadTableDataParams)
-      throws ImpalaException, IOException {
-    TLoadDataReq request = new TLoadDataReq();
-    JniUtil.deserializeThrift(protocolFactory_, request, thriftLoadTableDataParams);
-    TLoadDataResp response = frontend_.loadTableData(request);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(response);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Return an explain plan based on thriftQueryContext, a serialized TQueryContext.
-   * This call is thread-safe.
-   */
-  public String getExplainPlan(byte[] thriftQueryContext) throws ImpalaException {
-    TQueryCtx queryCtx = new TQueryCtx();
-    JniUtil.deserializeThrift(protocolFactory_, queryCtx, thriftQueryContext);
-    String plan = frontend_.getExplainString(queryCtx);
-    LOG.debug("Explain plan: " + plan);
-    return plan;
-  }
-
-  /**
-   * Implement Hive's pattern-matching semantics for "SHOW TABLE [[LIKE] 'pattern']", and
-   * return a list of table names matching an optional pattern.
-   * The only metacharacters are '*' which matches any string of characters, and '|'
-   * which denotes choice.  Doing the work here saves loading tables or databases from the
-   * metastore (which Hive would do if we passed the call through to the metastore
-   * client). If the pattern is null, all strings are considered to match. If it is an
-   * empty string, no strings match.
-   *
-   * The argument is a serialized TGetTablesParams object.
-   * The return type is a serialised TGetTablesResult object.
-   * @see Frontend#getTableNames
-   */
-  public byte[] getTableNames(byte[] thriftGetTablesParams) throws ImpalaException {
-    TGetTablesParams params = new TGetTablesParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftGetTablesParams);
-    // If the session was not set it indicates this is an internal Impala call.
-    User user = params.isSetSession() ?
-        new User(TSessionStateUtil.getEffectiveUser(params.getSession())) :
-        ImpalaInternalAdminUser.getInstance();
-
-    Preconditions.checkState(!params.isSetSession() || user != null );
-    List<String> tables = frontend_.getTableNames(params.db,
-        PatternMatcher.createHivePatternMatcher(params.pattern), user);
-
-    TGetTablesResult result = new TGetTablesResult();
-    result.setTables(tables);
-
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns files info of a table or partition.
-   * The argument is a serialized TShowFilesParams object.
-   * The return type is a serialised TResultSet object.
-   * @see Frontend#getTableFiles
-   */
-  public byte[] getTableFiles(byte[] thriftShowFilesParams) throws ImpalaException {
-    TShowFilesParams params = new TShowFilesParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftShowFilesParams);
-    TResultSet result = frontend_.getTableFiles(params);
-
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Implement Hive's pattern-matching semantics for "SHOW DATABASES [[LIKE] 'pattern']",
-   * and return a list of databases matching an optional pattern.
-   * @see JniFrontend#getTableNames(byte[]) for more detail.
-   *
-   * The argument is a serialized TGetDbParams object.
-   * The return type is a serialised TGetDbResult object.
-   * @see Frontend#getDbs
-   */
-  public byte[] getDbs(byte[] thriftGetTablesParams) throws ImpalaException {
-    TGetDbsParams params = new TGetDbsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftGetTablesParams);
-    // If the session was not set it indicates this is an internal Impala call.
-    User user = params.isSetSession() ?
-        new User(TSessionStateUtil.getEffectiveUser(params.getSession())) :
-        ImpalaInternalAdminUser.getInstance();
-    List<Db> dbs = frontend_.getDbs(
-        PatternMatcher.createHivePatternMatcher(params.pattern), user);
-    TGetDbsResult result = new TGetDbsResult();
-    List<TDatabase> tDbs = Lists.newArrayListWithCapacity(dbs.size());
-    for (Db db: dbs) tDbs.add(db.toThrift());
-    result.setDbs(tDbs);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns a list of data sources matching an optional pattern.
-   * The argument is a serialized TGetDataSrcsResult object.
-   * The return type is a serialised TGetDataSrcsResult object.
-   * @see Frontend#getDataSrcs
-   */
-  public byte[] getDataSrcMetadata(byte[] thriftParams) throws ImpalaException {
-    TGetDataSrcsParams params = new TGetDataSrcsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
-
-    TGetDataSrcsResult result = new TGetDataSrcsResult();
-    List<DataSource> dataSources = frontend_.getDataSrcs(params.pattern);
-    result.setData_src_names(Lists.<String>newArrayListWithCapacity(dataSources.size()));
-    result.setLocations(Lists.<String>newArrayListWithCapacity(dataSources.size()));
-    result.setClass_names(Lists.<String>newArrayListWithCapacity(dataSources.size()));
-    result.setApi_versions(Lists.<String>newArrayListWithCapacity(dataSources.size()));
-    for (DataSource dataSource: dataSources) {
-      result.addToData_src_names(dataSource.getName());
-      result.addToLocations(dataSource.getLocation());
-      result.addToClass_names(dataSource.getClassName());
-      result.addToApi_versions(dataSource.getApiVersion());
-    }
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  public byte[] getStats(byte[] thriftShowStatsParams) throws ImpalaException {
-    TShowStatsParams params = new TShowStatsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftShowStatsParams);
-    Preconditions.checkState(params.isSetTable_name());
-    TResultSet result;
-    if (params.isIs_show_col_stats()) {
-      result = frontend_.getColumnStats(params.getTable_name().getDb_name(),
-          params.getTable_name().getTable_name());
-    } else {
-      result = frontend_.getTableStats(params.getTable_name().getDb_name(),
-          params.getTable_name().getTable_name());
-    }
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns a list of function names matching an optional pattern.
-   * The argument is a serialized TGetFunctionsParams object.
-   * The return type is a serialised TGetFunctionsResult object.
-   * @see Frontend#getTableNames
-   */
-  public byte[] getFunctions(byte[] thriftGetFunctionsParams) throws ImpalaException {
-    TGetFunctionsParams params = new TGetFunctionsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftGetFunctionsParams);
-
-    TGetFunctionsResult result = new TGetFunctionsResult();
-    List<String> signatures = Lists.newArrayList();
-    List<String> retTypes = Lists.newArrayList();
-    List<String> fnBinaryTypes = Lists.newArrayList();
-    List<String> fnIsPersistent = Lists.newArrayList();
-    List<Function> fns = frontend_.getFunctions(params.category, params.db,
-        params.pattern, false);
-    for (Function fn: fns) {
-      signatures.add(fn.signatureString());
-      retTypes.add(fn.getReturnType().toString());
-      fnBinaryTypes.add(fn.getBinaryType().name());
-      fnIsPersistent.add(String.valueOf(fn.isPersistent()));
-    }
-    result.setFn_signatures(signatures);
-    result.setFn_ret_types(retTypes);
-    result.setFn_binary_types(fnBinaryTypes);
-    result.setFn_persistence(fnIsPersistent);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Gets the thrift representation of a catalog object.
-   */
-  public byte[] getCatalogObject(byte[] thriftParams) throws ImpalaException,
-      TException {
-    TCatalogObject objectDescription = new TCatalogObject();
-    JniUtil.deserializeThrift(protocolFactory_, objectDescription, thriftParams);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    return serializer.serialize(
-        frontend_.getCatalog().getTCatalogObject(objectDescription));
-  }
-
-  /**
-   * Returns a database's properties such as its location and comment.
-   * The argument is a serialized TDescribeDbParams object.
-   * The return type is a serialised TDescribeDbResult object.
-   * @see Frontend#describeDb
-   */
-  public byte[] describeDb(byte[] thriftDescribeDbParams) throws ImpalaException {
-    TDescribeDbParams params = new TDescribeDbParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftDescribeDbParams);
-
-    TDescribeResult result = frontend_.describeDb(
-        params.getDb(), params.getOutput_style());
-
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns a list of the columns making up a table.
-   * The argument is a serialized TDescribeParams object.
-   * The return type is a serialised TDescribeResult object.
-   * @see Frontend#describeTable
-   */
-  public byte[] describeTable(byte[] thriftDescribeTableParams) throws ImpalaException {
-    TDescribeTableParams params = new TDescribeTableParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftDescribeTableParams);
-
-    TDescribeResult result = frontend_.describeTable(
-        params.getDb(), params.getTable_name(), params.getOutput_style(),
-        params.getResult_struct());
-
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns a SQL DDL string for creating the specified table.
-   */
-  public String showCreateTable(byte[] thriftTableName)
-      throws ImpalaException {
-    TTableName params = new TTableName();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftTableName);
-    return ToSqlUtils.getCreateTableSql(frontend_.getCatalog().getTable(
-        params.getDb_name(), params.getTable_name()));
-  }
-
-  /**
-   * Returns a SQL DDL string for creating the specified function.
-   */
-  public String showCreateFunction(byte[] thriftShowCreateFunctionParams)
-      throws ImpalaException {
-    TGetFunctionsParams params = new TGetFunctionsParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftShowCreateFunctionParams);
-    Preconditions.checkArgument(params.category == TFunctionCategory.SCALAR ||
-        params.category == TFunctionCategory.AGGREGATE);
-    return ToSqlUtils.getCreateFunctionSql(frontend_.getFunctions(
-        params.category, params.db, params.pattern, true));
-  }
-
-  /**
-   * Gets all roles
-   */
-  public byte[] getRoles(byte[] showRolesParams) throws ImpalaException {
-    TShowRolesParams params = new TShowRolesParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, showRolesParams);
-    TShowRolesResult result = new TShowRolesResult();
-
-    List<Role> roles = Lists.newArrayList();
-    if (params.isIs_show_current_roles() || params.isSetGrant_group()) {
-      User user = new User(params.getRequesting_user());
-      Set<String> groupNames;
-      if (params.isIs_show_current_roles()) {
-        groupNames = frontend_.getAuthzChecker().getUserGroups(user);
-      } else {
-        Preconditions.checkState(params.isSetGrant_group());
-        groupNames = Sets.newHashSet(params.getGrant_group());
-      }
-      for (String groupName: groupNames) {
-        roles.addAll(frontend_.getCatalog().getAuthPolicy().getGrantedRoles(groupName));
-      }
-    } else {
-      Preconditions.checkState(!params.isIs_show_current_roles());
-      roles = frontend_.getCatalog().getAuthPolicy().getAllRoles();
-    }
-
-    result.setRole_names(Lists.<String>newArrayListWithExpectedSize(roles.size()));
-    for (Role role: roles) {
-      result.getRole_names().add(role.getName());
-    }
-
-    Collections.sort(result.getRole_names());
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  public byte[] getRolePrivileges(byte[] showGrantRolesParams) throws ImpalaException {
-    TShowGrantRoleParams params = new TShowGrantRoleParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, showGrantRolesParams);
-    TResultSet result = frontend_.getCatalog().getAuthPolicy().getRolePrivileges(
-        params.getRole_name(), params.getPrivilege());
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Executes a HiveServer2 metadata operation and returns a TResultSet
-   */
-  public byte[] execHiveServer2MetadataOp(byte[] metadataOpsParams)
-      throws ImpalaException {
-    TMetadataOpRequest params = new TMetadataOpRequest();
-    JniUtil.deserializeThrift(protocolFactory_, params, metadataOpsParams);
-    TResultSet result = frontend_.execHiveServer2MetadataOp(params);
-
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  public void setCatalogInitialized() {
-    frontend_.getCatalog().setIsReady(true);
-  }
-
-  // Caching this saves ~50ms per call to getHadoopConfigAsHtml
-  private static final Configuration CONF = new Configuration();
-
-  /**
-   * Returns a string of all loaded Hadoop configuration parameters as a table of keys
-   * and values. If asText is true, output in raw text. Otherwise, output in html.
-   */
-  public byte[] getAllHadoopConfigs() throws ImpalaException {
-    Map<String, String> configs = Maps.newHashMap();
-    for (Map.Entry<String, String> e: CONF) {
-      configs.put(e.getKey(), e.getValue());
-    }
-    TGetAllHadoopConfigsResponse result = new TGetAllHadoopConfigsResponse();
-    result.setConfigs(configs);
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns the corresponding config value for the given key as a serialized
-   * TGetHadoopConfigResponse. If the config value is null, the 'value' field in the
-   * thrift response object will not be set.
-   */
-  public byte[] getHadoopConfig(byte[] serializedRequest) throws ImpalaException {
-    TGetHadoopConfigRequest request = new TGetHadoopConfigRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, serializedRequest);
-    TGetHadoopConfigResponse result = new TGetHadoopConfigResponse();
-    result.setValue(CONF.get(request.getName()));
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Returns an error string describing all configuration issues. If no config issues are
-   * found, returns an empty string.
-   */
-  public String checkConfiguration() {
-    StringBuilder output = new StringBuilder();
-    output.append(checkLogFilePermission());
-    output.append(checkFileSystem(CONF));
-    output.append(checkShortCircuitRead(CONF));
-    output.append(checkBlockLocationTracking(CONF));
-    return output.toString();
-  }
-
-  /**
-   * Returns an empty string if Impala has permission to write to FE log files. If not,
-   * returns an error string describing the issues.
-   */
-  private String checkLogFilePermission() {
-    org.apache.log4j.Logger l4jRootLogger = org.apache.log4j.Logger.getRootLogger();
-    Enumeration appenders = l4jRootLogger.getAllAppenders();
-    while (appenders.hasMoreElements()) {
-      Appender appender = (Appender) appenders.nextElement();
-      if (appender instanceof FileAppender) {
-        if (((FileAppender) appender).getFile() == null) {
-          // If Impala does not have permission to write to the log file, the
-          // FileAppender will fail to initialize and logFile will be null.
-          // Unfortunately, we can't get the log file name here.
-          return "Impala does not have permission to write to the log file specified " +
-              "in log4j.properties.";
-        }
-      }
-    }
-    return "";
-  }
-
-  /**
-   * Returns an error message if short circuit reads are enabled but misconfigured.
-   * Otherwise, returns an empty string,
-   */
-  private String checkShortCircuitRead(Configuration conf) {
-    if (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
-        DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT)) {
-      LOG.info("Short-circuit reads are not enabled.");
-      return "";
-    }
-
-    StringBuilder output = new StringBuilder();
-    String errorMessage = "Invalid short-circuit reads configuration:\n";
-    String prefix = "  - ";
-    StringBuilder errorCause = new StringBuilder();
-
-    // dfs.domain.socket.path must be set properly
-    String domainSocketPath = conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
-        DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
-    if (domainSocketPath.isEmpty()) {
-      errorCause.append(prefix);
-      errorCause.append(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
-      errorCause.append(" is not configured.\n");
-    } else {
-      // The socket path parent directory must be readable and executable.
-      File socketFile = new File(domainSocketPath);
-      File socketDir = socketFile.getParentFile();
-      if (socketDir == null || !socketDir.canRead() || !socketDir.canExecute()) {
-        errorCause.append(prefix);
-        errorCause.append("Impala cannot read or execute the parent directory of ");
-        errorCause.append(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
-        errorCause.append("\n");
-      }
-    }
-
-    // dfs.client.use.legacy.blockreader.local must be set to false
-    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
-        DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT)) {
-      errorCause.append(prefix);
-      errorCause.append(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL);
-      errorCause.append(" should not be enabled.\n");
-    }
-
-    if (errorCause.length() > 0) {
-      output.append(errorMessage);
-      output.append(errorCause);
-    }
-
-    return output.toString();
-  }
-
-  /**
-   * Return an empty string if block location tracking is properly enabled. If not,
-   * return an error string describing the issues.
-   */
-  private String checkBlockLocationTracking(Configuration conf) {
-    StringBuilder output = new StringBuilder();
-    String errorMessage = "ERROR: block location tracking is not properly enabled " +
-        "because\n";
-    String prefix = "  - ";
-    StringBuilder errorCause = new StringBuilder();
-    if (!conf.getBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
-        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT)) {
-      errorCause.append(prefix);
-      errorCause.append(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED);
-      errorCause.append(" is not enabled.\n");
-    }
-
-    // dfs.client.file-block-storage-locations.timeout.millis should be >= 10 seconds
-    int dfsClientFileBlockStorageLocationsTimeoutMs = conf.getInt(
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
-    if (dfsClientFileBlockStorageLocationsTimeoutMs <
-        MIN_DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS) {
-      errorCause.append(prefix);
-      errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS);
-      errorCause.append(" is too low. It should be at least 10 seconds.\n");
-    }
-
-    if (errorCause.length() > 0) {
-      output.append(errorMessage);
-      output.append(errorCause);
-    }
-
-    return output.toString();
-  }
-
-  /**
-   * Return an empty string if the default FileSystem configured in CONF refers to a
-   * DistributedFileSystem and Impala can list the root directory "/". Otherwise,
-   * return an error string describing the issues.
-   */
-  private String checkFileSystem(Configuration conf) {
-    try {
-      FileSystem fs = FileSystem.get(CONF);
-      if (!(fs instanceof DistributedFileSystem || fs instanceof S3AFileSystem)) {
-        return "Currently configured default filesystem: " +
-            fs.getClass().getSimpleName() + ". " +
-            CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY +
-            " (" + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ")" +
-            " is not supported.";
-      }
-    } catch (IOException e) {
-      return "couldn't retrieve FileSystem:\n" + e.getMessage();
-    }
-
-    try {
-      FileSystemUtil.getTotalNumVisibleFiles(new Path("/"));
-    } catch (IOException e) {
-      return "Could not read the root directory at " +
-          CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) +
-          ". Error was: \n" + e.getMessage();
-    }
-    return "";
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/MetadataOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/MetadataOp.java b/fe/src/main/java/com/cloudera/impala/service/MetadataOp.java
deleted file mode 100644
index ebfd984..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/MetadataOp.java
+++ /dev/null
@@ -1,641 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.sql.DatabaseMetaData;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.util.PatternMatcher;
-import com.google.common.collect.Lists;
-
-/**
- * Metadata operation. It contains static methods to execute HiveServer2 metadata
- * operations and return the results, result schema and an unique request id in
- * TResultSet.
- */
-public class MetadataOp {
-  private static final Logger LOG = LoggerFactory.getLogger(MetadataOp.class);
-
-  // Static column values
-  private static final TColumnValue NULL_COL_VAL = new TColumnValue();
-  private static final TColumnValue EMPTY_COL_VAL = createTColumnValue("");
-  private static final TColumnValue TABLE_TYPE_COL_VAL = createTColumnValue("TABLE");
-
-  // Result set schema for each of the metadata operations.
-  private final static TResultSetMetadata GET_CATALOGS_MD = new TResultSetMetadata();
-  private final static TResultSetMetadata GET_COLUMNS_MD = new TResultSetMetadata();
-  private final static TResultSetMetadata GET_SCHEMAS_MD = new TResultSetMetadata();
-  private final static TResultSetMetadata GET_TABLES_MD = new TResultSetMetadata();
-  private static final TResultSetMetadata GET_TYPEINFO_MD = new TResultSetMetadata();
-  private static final TResultSetMetadata GET_TABLE_TYPES_MD = new TResultSetMetadata();
-  private static final TResultSetMetadata GET_FUNCTIONS_MD = new TResultSetMetadata();
-
-  // GetTypeInfo contains all primitive types supported by Impala.
-  private static final List<TResultRow> GET_TYPEINFO_RESULTS = Lists.newArrayList();
-
-  // GetTableTypes only returns a single value: "TABLE".
-  private static final List<TResultRow> GET_TABLE_TYPES_RESULTS = Lists.newArrayList();
-
-  // Initialize result set schemas and static result set
-  static {
-    initialzeResultSetSchemas();
-    createGetTypeInfoResults();
-    createGetTableTypesResults();
-  }
-
-  /**
-   * Initialize result set schema for each of the HiveServer2 operations
-   */
-  private static void initialzeResultSetSchemas() {
-    GET_CATALOGS_MD.addToColumns(new TColumn("TABLE_CAT", Type.STRING.toThrift()));
-
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("TABLE_CAT", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("TABLE_MD", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("TABLE_NAME", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("COLUMN_NAME", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("DATA_TYPE", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("TYPE_NAME", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("COLUMN_SIZE", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("BUFFER_LENGTH", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("DECIMAL_DIGITS", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("NUM_PREC_RADIX", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("NULLABLE", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("REMARKS", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("COLUMN_DEF", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SQL_DATA_TYPE", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SQL_DATETIME_SUB", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("CHAR_OCTET_LENGTH", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("ORDINAL_POSITION", Type.INT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("IS_NULLABLE", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SCOPE_CATALOG", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SCOPE_SCHEMA", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SCOPE_TABLE", Type.STRING.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("SOURCE_DATA_TYPE", Type.SMALLINT.toThrift()));
-    GET_COLUMNS_MD.addToColumns(
-        new TColumn("IS_AUTO_INCREMENT", Type.STRING.toThrift()));
-
-    GET_SCHEMAS_MD.addToColumns(
-        new TColumn("TABLE_SCHEM", Type.STRING.toThrift()));
-    GET_SCHEMAS_MD.addToColumns(
-        new TColumn("TABLE_CATALOG", Type.STRING.toThrift()));
-
-    GET_TABLES_MD.addToColumns(
-        new TColumn("TABLE_CAT", Type.STRING.toThrift()));
-    GET_TABLES_MD.addToColumns(
-        new TColumn("TABLE_SCHEM", Type.STRING.toThrift()));
-    GET_TABLES_MD.addToColumns(
-        new TColumn("TABLE_NAME", Type.STRING.toThrift()));
-    GET_TABLES_MD.addToColumns(
-        new TColumn("TABLE_TYPE", Type.STRING.toThrift()));
-    GET_TABLES_MD.addToColumns(
-        new TColumn("REMARKS", Type.STRING.toThrift()));
-
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("TYPE_NAME", Type.STRING.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("DATA_TYPE", Type.INT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("PRECISION", Type.INT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("LITERAL_PREFIX", Type.STRING.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("LITERAL_SUFFIX", Type.STRING.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("CREATE_PARAMS", Type.STRING.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("NULLABLE", Type.INT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("CASE_SENSITIVE", Type.BOOLEAN.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("SEARCHABLE", Type.SMALLINT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("UNSIGNED_ATTRIBUTE", Type.BOOLEAN.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("FIXED_PREC_SCALE", Type.BOOLEAN.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("AUTO_INCREMENT", Type.BOOLEAN.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("LOCAL_TYPE_NAME", Type.STRING.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("MINIMUM_SCALE", Type.SMALLINT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("MAXIMUM_SCALE", Type.SMALLINT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("SQL_DATA_TYPE", Type.INT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("SQL_DATETIME_SUB", Type.INT.toThrift()));
-    GET_TYPEINFO_MD.addToColumns(
-        new TColumn("NUM_PREC_RADIX", Type.INT.toThrift()));
-
-    GET_TABLE_TYPES_MD.addToColumns(
-        new TColumn("TABLE_TYPE", Type.STRING.toThrift()));
-
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("FUNCTION_CAT", Type.STRING.toThrift()));
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("FUNCTION_SCHEM", Type.STRING.toThrift()));
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("FUNCTION_NAME", Type.STRING.toThrift()));
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("REMARKS", Type.STRING.toThrift()));
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("FUNCTION_TYPE", Type.INT.toThrift()));
-    GET_FUNCTIONS_MD.addToColumns(
-        new TColumn("SPECIFIC_NAME", Type.STRING.toThrift()));
-  }
-
-  /**
-   * Contains lists of databases, lists of table belonging to the dbs, list of columns
-   * belonging to the tables, and list of user functions.
-   */
-  private static class DbsMetadata {
-     // the list of database
-    public List<String> dbs = Lists.newArrayList();
-
-    // tableNames[i] are the tables within dbs[i]
-    public List<List<String>> tableNames = Lists.newArrayList();
-
-    // columns[i][j] are the columns of tableNames[j] in dbs[i].
-    // If the table is missing (not yet loaded) its column list will be empty.
-    public List<List<List<Column>>> columns = Lists.newArrayList();
-
-    // functions[i] are the functions within dbs[i]
-    public List<List<Function>> functions = Lists.newArrayList();
-
-    // Set of tables that are missing (not yet loaded).
-    public Set<TableName> missingTbls = new HashSet<TableName>();
-  }
-
-  /**
-   * Returns the list of schemas, tables, columns and user functions that match the
-   * corresponding matchers.
-   *
-   * The return value 'result.dbs' contains the list of databases that match
-   * 'schemaPatternMatcher'.
-   * 'result.tableNames[i]' contains the list of tables inside dbs[i] that match
-   * 'tablePatternMatcher'.
-   * 'result.columns[i][j]' contains the list of columns of table[j] in dbs[i]
-   * that match 'columnPatternMatcher'.
-   * result.functions[i] contains the list of functions inside dbs[i] that
-   * match 'fnPatternMatcher'.
-   *
-   * If 'fnPatternMatcher' is not PatternMatcher.MATCHER_MATCH_NONE, then only function
-   * metadata will be returned.
-   * If 'tablePatternMatcher' is PatternMatcher.MATCHER_MATCH_NONE, then
-   * 'result.tableNames' and 'result.columns' will not be populated.
-   * If columns is null, then 'result.columns' will not be populated.
-   */
-  private static DbsMetadata getDbsMetadata(Frontend fe, String catalogName,
-      PatternMatcher schemaPatternMatcher, PatternMatcher tablePatternMatcher,
-      PatternMatcher columnPatternMatcher, PatternMatcher fnPatternMatcher, User user)
-      throws ImpalaException {
-    DbsMetadata result = new DbsMetadata();
-
-    // Hive does not have a catalog concept. Returns nothing if the request specifies an
-    // non-empty catalog pattern.
-    if (!isEmptyPattern(catalogName)) {
-      return result;
-    }
-
-    ImpaladCatalog catalog = fe.getCatalog();
-    for (Db db: fe.getDbs(schemaPatternMatcher, user)) {
-      if (fnPatternMatcher != PatternMatcher.MATCHER_MATCH_NONE) {
-        // Get function metadata
-        List<Function> fns = db.getFunctions(null, fnPatternMatcher);
-        result.functions.add(fns);
-      } else {
-        // Get table metadata
-        List<String> tableList = Lists.newArrayList();
-        List<List<Column>> tablesColumnsList = Lists.newArrayList();
-        for (String tabName: fe.getTableNames(db.getName(), tablePatternMatcher, user)) {
-          tableList.add(tabName);
-          Table table = null;
-          try {
-            table = catalog.getTable(db.getName(), tabName);
-          } catch (TableLoadingException e) {
-            // Ignore exception (this table will be skipped).
-          }
-          if (table == null) continue;
-
-          List<Column> columns = Lists.newArrayList();
-          // If the table is not yet loaded, the columns will be unknown. Add it
-          // to the set of missing tables.
-          if (!table.isLoaded()) {
-            result.missingTbls.add(new TableName(db.getName(), tabName));
-          } else {
-            columns.addAll(fe.getColumns(table, columnPatternMatcher, user));
-          }
-          tablesColumnsList.add(columns);
-        }
-        result.dbs.add(db.getName());
-        result.tableNames.add(tableList);
-        result.columns.add(tablesColumnsList);
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Executes the GetCatalogs HiveServer2 operation and returns TResultSet.
-   * Hive does not have a catalog concept. It always returns an empty result set.
-   */
-  public static TResultSet getCatalogs() {
-    return createEmptyResultSet(GET_CATALOGS_MD);
-  }
-
-  /**
-   * Executes the GetColumns HiveServer2 operation and returns TResultSet.
-   * Queries the Impala catalog to return the list of table columns that fit the
-   * search patterns. Matching columns requires loading the table metadata, so if
-   * any missing tables are found an RPC to the CatalogServer will be executed
-   * to request loading these tables. The matching process will be restarted
-   * once the required tables have been loaded in the local Impalad Catalog or
-   * the wait timeout has been reached.
-   *
-   * The parameters catalogName, schemaName, tableName and columnName are JDBC search
-   * patterns.
-   */
-  public static TResultSet getColumns(Frontend fe,
-      String catalogName, String schemaName, String tableName, String columnName,
-      User user)
-      throws ImpalaException {
-    TResultSet result = createEmptyResultSet(GET_COLUMNS_MD);
-
-    // Get the list of schemas, tables, and columns that satisfy the search conditions.
-    DbsMetadata dbsMetadata = null;
-    PatternMatcher schemaMatcher = PatternMatcher.createJdbcPatternMatcher(schemaName);
-    PatternMatcher tableMatcher = PatternMatcher.createJdbcPatternMatcher(tableName);
-    PatternMatcher columnMatcher = PatternMatcher.createJdbcPatternMatcher(columnName);
-    while (dbsMetadata == null || !dbsMetadata.missingTbls.isEmpty()) {
-      dbsMetadata = getDbsMetadata(fe, catalogName, schemaMatcher, tableMatcher,
-          columnMatcher, PatternMatcher.MATCHER_MATCH_NONE, user);
-      if (!fe.requestTblLoadAndWait(dbsMetadata.missingTbls)) {
-        LOG.info("Timed out waiting for missing tables. Load request will be retried.");
-      }
-    }
-
-    for (int i = 0; i < dbsMetadata.dbs.size(); ++i) {
-      String dbName = dbsMetadata.dbs.get(i);
-      for (int j = 0; j < dbsMetadata.tableNames.get(i).size(); ++j) {
-        String tabName = dbsMetadata.tableNames.get(i).get(j);
-        for (int k = 0; k < dbsMetadata.columns.get(i).get(j).size(); ++k) {
-          Column column = dbsMetadata.columns.get(i).get(j).get(k);
-          Type colType = column.getType();
-          String colTypeName = getHs2MetadataTypeName(colType);
-
-          TResultRow row = new TResultRow();
-          row.colVals = Lists.newArrayList();
-          row.colVals.add(NULL_COL_VAL); // TABLE_CAT
-          row.colVals.add(createTColumnValue(dbName)); // TABLE_SCHEM
-          row.colVals.add(createTColumnValue(tabName)); // TABLE_NAME
-          row.colVals.add(createTColumnValue(column.getName())); // COLUMN_NAME
-          row.colVals.add(createTColumnValue(colType.getJavaSqlType())); // DATA_TYPE
-          row.colVals.add(createTColumnValue(colTypeName)); // TYPE_NAME
-          row.colVals.add(createTColumnValue(colType.getColumnSize())); // COLUMN_SIZE
-          row.colVals.add(NULL_COL_VAL); // BUFFER_LENGTH, unused
-          // DECIMAL_DIGITS
-          row.colVals.add(createTColumnValue(colType.getDecimalDigits()));
-          // NUM_PREC_RADIX
-          row.colVals.add(createTColumnValue(colType.getNumPrecRadix()));
-          // NULLABLE
-          row.colVals.add(createTColumnValue(DatabaseMetaData.columnNullable));
-          row.colVals.add(NULL_COL_VAL); // REMARKS
-          row.colVals.add(NULL_COL_VAL); // COLUMN_DEF
-          row.colVals.add(NULL_COL_VAL); // SQL_DATA_TYPE
-          row.colVals.add(NULL_COL_VAL); // SQL_DATETIME_SUB
-          row.colVals.add(NULL_COL_VAL); // CHAR_OCTET_LENGTH
-          // ORDINAL_POSITION starts from 1
-          row.colVals.add(createTColumnValue(column.getPosition() + 1));
-          row.colVals.add(createTColumnValue("YES")); // IS_NULLABLE
-          row.colVals.add(NULL_COL_VAL); // SCOPE_CATALOG
-          row.colVals.add(NULL_COL_VAL); // SCOPE_SCHEMA
-          row.colVals.add(NULL_COL_VAL); // SCOPE_TABLE
-          row.colVals.add(NULL_COL_VAL); // SOURCE_DATA_TYPE
-          row.colVals.add(createTColumnValue("NO")); // IS_AUTO_INCREMENT
-          result.rows.add(row);
-        }
-      }
-    }
-    LOG.debug("Returning " + result.rows.size() + " table columns");
-    return result;
-  }
-
-  /**
-   * Returns the string representation of the given Impala column type to populate the
-   * TYPE_NAME column of the result set returned by a HiveServer2 GetColumns() request.
-   *
-   * To be consistent with Hive's behavior, the TYPE_NAME field is populated with the
-   * primitive type name for scalar types, and with the full toSql() for complex types.
-   * The resulting type names are somewhat inconsistent, because nested types are printed
-   * differently than top-level types, e.g.:
-   * toSql()                     TYPE_NAME
-   * DECIMAL(10,10)         -->  DECIMAL
-   * CHAR(10)               -->  CHAR
-   * VARCHAR(10)            -->  VARCHAR
-   * ARRAY<DECIMAL(10,10)>  -->  ARRAY<DECIMAL(10,10)>
-   * ARRAY<CHAR(10)>        -->  ARRAY<CHAR(10)>
-   * ARRAY<VARCHAR(10)>     -->  ARRAY<VARCHAR(10)>
-   */
-  private static String getHs2MetadataTypeName(Type colType) {
-    if (colType.isScalarType()) return colType.getPrimitiveType().toString();
-    return colType.toSql();
-  }
-
-  /**
-   * Executes the GetSchemas HiveServer2 operation and returns TResultSet.
-   * It queries the Impala catalog to return the list of schemas that fit the search
-   * pattern.
-   * catalogName and schemaName are JDBC search patterns.
-   */
-  public static TResultSet getSchemas(Frontend fe,
-      String catalogName, String schemaName, User user) throws ImpalaException {
-    TResultSet result = createEmptyResultSet(GET_SCHEMAS_MD);
-
-    // Get the list of schemas that satisfy the search condition.
-    DbsMetadata dbsMetadata = getDbsMetadata(fe, catalogName,
-        PatternMatcher.createJdbcPatternMatcher(schemaName),
-        PatternMatcher.MATCHER_MATCH_NONE,
-        PatternMatcher.MATCHER_MATCH_NONE,
-        PatternMatcher.MATCHER_MATCH_NONE, user);
-
-    for (int i = 0; i < dbsMetadata.dbs.size(); ++i) {
-      String dbName = dbsMetadata.dbs.get(i);
-      TResultRow row = new TResultRow();
-      row.colVals = Lists.newArrayList();
-      row.colVals.add(createTColumnValue(dbName)); // TABLE_SCHEM
-      row.colVals.add(EMPTY_COL_VAL); // default Hive catalog is an empty string.
-      result.rows.add(row);
-    }
-
-    LOG.debug("Returning " + result.rows.size() + " schemas");
-    return result;
-  }
-
-  /**
-   * Executes the GetTables HiveServer2 operation and returns TResultSet.
-   * It queries the Impala catalog to return the list of tables that fit the search
-   * patterns.
-   * catalogName, schemaName and tableName are JDBC search patterns.
-   * tableTypes specifies which table types to search for (TABLE, VIEW, etc).
-   */
-  public static TResultSet getTables(Frontend fe, String catalogName,
-      String schemaName, String tableName, List<String> tableTypes, User user)
-          throws ImpalaException{
-    TResultSet result = createEmptyResultSet(GET_TABLES_MD);
-
-    // Impala catalog only contains TABLE. Returns an empty set if the search does not
-    // include TABLE.
-    if (tableTypes != null && !tableTypes.isEmpty()) {
-      boolean hasTableType = false;
-      for (String tableType: tableTypes) {
-        if (tableType.toLowerCase().equals("table")) {
-          hasTableType = true;
-          break;
-        }
-      }
-      if (!hasTableType) {
-        return result;
-      }
-    }
-
-    // Get the list of schemas, tables that satisfy the search conditions.
-    DbsMetadata dbsMetadata = getDbsMetadata(fe, catalogName,
-        PatternMatcher.createJdbcPatternMatcher(schemaName),
-        PatternMatcher.createJdbcPatternMatcher(tableName),
-        PatternMatcher.MATCHER_MATCH_NONE,
-        PatternMatcher.MATCHER_MATCH_NONE, user);
-
-    for (int i = 0; i < dbsMetadata.dbs.size(); ++i) {
-      String dbName = dbsMetadata.dbs.get(i);
-      for (int j = 0; j < dbsMetadata.tableNames.get(i).size(); ++j) {
-        String tabName = dbsMetadata.tableNames.get(i).get(j);
-        TResultRow row = new TResultRow();
-        row.colVals = Lists.newArrayList();
-        row.colVals.add(EMPTY_COL_VAL);
-        row.colVals.add(createTColumnValue(dbName));
-        row.colVals.add(createTColumnValue(tabName));
-        row.colVals.add(TABLE_TYPE_COL_VAL);
-        // TODO: Return table comments when it is available in the Impala catalog.
-        row.colVals.add(EMPTY_COL_VAL);
-        result.rows.add(row);
-      }
-    }
-    LOG.debug("Returning " + result.rows.size() + " tables");
-    return result;
-  }
-
-  /**
-   * Executes the GetTypeInfo HiveServer2 operation and returns Impala supported types.
-   */
-  public static TResultSet getTypeInfo() {
-    TResultSet result = createEmptyResultSet(GET_TYPEINFO_MD);
-    result.rows = GET_TYPEINFO_RESULTS;
-    return result;
-  }
-
-  /**
-   * Executes the GetTableTypes HiveServer2 operation.
-   */
-  public static TResultSet getTableTypes() {
-    TResultSet result = createEmptyResultSet(GET_TABLE_TYPES_MD);
-    result.rows = GET_TABLE_TYPES_RESULTS;
-    return result;
-  }
-
-  /**
-   * Create a function result row in the JDBC format.
-   */
-  private static TResultRow createFunctionResultRow(Function fn) {
-    TResultRow row = new TResultRow();
-    row.colVals = Lists.newArrayList();
-    row.colVals.add(NULL_COL_VAL); // FUNCTION_CAT
-    row.colVals.add(createTColumnValue(fn.dbName())); // FUNCTION_SCHEM
-    row.colVals.add(createTColumnValue(fn.functionName())); // FUNCTION_NAME
-    row.colVals.add(EMPTY_COL_VAL); // REMARKS
-    // FUNCTION_TYPE
-    row.colVals.add(createTColumnValue(DatabaseMetaData.functionNoTable));
-    row.colVals.add(createTColumnValue(fn.signatureString())); // SPECIFIC_NAME
-    return row;
-  }
-
-  /**
-   * Executes the GetFunctions HiveServer2 operation and returns TResultSet.
-   * Returns the list of functions that fit the search patterns.
-   * catalogName, schemaName and functionName are JDBC search patterns.
-   * @throws ImpalaException
-   */
-  public static TResultSet getFunctions(Frontend fe,
-      String catalogName, String schemaName, String functionName,
-      User user) throws ImpalaException {
-    TResultSet result = createEmptyResultSet(GET_FUNCTIONS_MD);
-
-    // Impala's built-in functions do not have a catalog name or schema name.
-    if (!isEmptyPattern(catalogName) || !isEmptyPattern(schemaName)) {
-      return result;
-    }
-
-    DbsMetadata dbsMetadata = getDbsMetadata(fe, catalogName,
-        PatternMatcher.createJdbcPatternMatcher(schemaName),
-        PatternMatcher.MATCHER_MATCH_NONE,
-        PatternMatcher.MATCHER_MATCH_NONE,
-        PatternMatcher.createJdbcPatternMatcher(functionName), user);
-    for (List<Function> fns: dbsMetadata.functions) {
-      for (Function fn: fns) {
-        result.rows.add(createFunctionResultRow(fn));
-      }
-    }
-
-    return result;
-  }
-
-  /**
-   * Fills the GET_TYPEINFO_RESULTS with supported primitive types.
-   */
-  private static void createGetTypeInfoResults() {
-    for (PrimitiveType ptype: PrimitiveType.values()) {
-      if (ptype.equals(PrimitiveType.INVALID_TYPE) ||
-          ptype.equals(PrimitiveType.DATE) ||
-          ptype.equals(PrimitiveType.DATETIME) ||
-          ptype.equals(PrimitiveType.DECIMAL) ||
-          ptype.equals(PrimitiveType.CHAR) ||
-          ptype.equals(PrimitiveType.VARCHAR)) {
-        continue;
-      }
-      Type type = ScalarType.createType(ptype);
-      TResultRow row = new TResultRow();
-      row.colVals = Lists.newArrayList();
-      row.colVals.add(createTColumnValue(ptype.name())); // TYPE_NAME
-      row.colVals.add(createTColumnValue(type.getJavaSqlType()));  // DATA_TYPE
-      row.colVals.add(createTColumnValue(type.getPrecision()));  // PRECISION
-      row.colVals.add(NULL_COL_VAL); // LITERAL_PREFIX
-      row.colVals.add(NULL_COL_VAL); // LITERAL_SUFFIX
-      row.colVals.add(NULL_COL_VAL); // CREATE_PARAMS
-      row.colVals.add(createTColumnValue(DatabaseMetaData.typeNullable));  // NULLABLE
-      row.colVals.add(createTColumnValue(type.isStringType())); // CASE_SENSITIVE
-      row.colVals.add(createTColumnValue(DatabaseMetaData.typeSearchable));  // SEARCHABLE
-      row.colVals.add(createTColumnValue(!type.isNumericType())); // UNSIGNED_ATTRIBUTE
-      row.colVals.add(createTColumnValue(false));  // FIXED_PREC_SCALE
-      row.colVals.add(createTColumnValue(false));  // AUTO_INCREMENT
-      row.colVals.add(NULL_COL_VAL); // LOCAL_TYPE_NAME
-      row.colVals.add(createTColumnValue(0));  // MINIMUM_SCALE
-      row.colVals.add(createTColumnValue(0));  // MAXIMUM_SCALE
-      row.colVals.add(NULL_COL_VAL); // SQL_DATA_TYPE
-      row.colVals.add(NULL_COL_VAL); // SQL_DATETIME_SUB
-      row.colVals.add(createTColumnValue(type.getNumPrecRadix()));  // NUM_PREC_RADIX
-      GET_TYPEINFO_RESULTS.add(row);
-    }
-  }
-
-  /**
-   * Fills the GET_TYPEINFO_RESULTS with "TABLE".
-   */
-  private static void createGetTableTypesResults() {
-    TResultRow row = new TResultRow();
-    row.colVals = Lists.newArrayList();
-    row.colVals.add(createTColumnValue("TABLE"));
-    GET_TABLE_TYPES_RESULTS.add(row);
-  }
-
-  /**
-   * Returns an TResultSet with the specified schema. The
-   * result set will be empty.
-   */
-  private static TResultSet createEmptyResultSet(TResultSetMetadata metadata) {
-    TResultSet result = new TResultSet();
-    result.rows = Lists.newArrayList();
-    result.schema = metadata;
-    return result;
-  }
-
-  // Helper methods to create TColumnValue
-  public static TColumnValue createTColumnValue(String val) {
-    TColumnValue colVal = new TColumnValue();
-    if (val != null) {
-      colVal.setString_val(val);
-    }
-    return colVal;
-  }
-
-  public static TColumnValue createTColumnValue(Integer val) {
-    TColumnValue colVal = new TColumnValue();
-    if (val != null) {
-      colVal.setInt_val(val.intValue());
-    }
-    return colVal;
-  }
-
-  public static TColumnValue createTColumnValue(Boolean val) {
-    TColumnValue colVal = new TColumnValue();
-    if (val != null) {
-      colVal.setBool_val(val);
-    }
-    return colVal;
-  }
-
-  /**
-   * Returns true if the JDBC search pattern is empty: either null, empty string or "%".
-   */
-  public static boolean isEmptyPattern(final String pattern) {
-    return (pattern == null) || pattern.isEmpty() ||
-           (pattern.length() == 1 && pattern.equals("%"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/AvroSchemaConverter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaConverter.java b/fe/src/main/java/com/cloudera/impala/util/AvroSchemaConverter.java
deleted file mode 100644
index 0feb1c1..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaConverter.java
+++ /dev/null
@@ -1,209 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloudera.impala.util;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.avro.Schema;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.codehaus.jackson.JsonNode;
-import org.codehaus.jackson.node.IntNode;
-import org.codehaus.jackson.node.JsonNodeFactory;
-
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.google.common.collect.Lists;
-
-/**
- * Utility class to generate an Impala-compatible Avro Schema from other schemas, e.g.,
- * an Impala table, a list of Impala columns, a list of Hive field schemas, etc.
- *
- * Error behavior: These functions throw an UnsupportedOperationException when failing
- * to generate an Impala-compatible Avro schema, e.g., because of an unknown type or a
- * type not supported by Impala.
- *
- * Behavior for TIMESTAMP:
- * A TIMESTAMP column definition maps to an Avro STRING and is created as a STRING column,
- * because Avro has no binary TIMESTAMP representation. As a result, no Avro table may
- * have a TIMESTAMP column.
- */
-public class AvroSchemaConverter {
-  // Arbitrarily chosen schema name and record prefix. Note that
-  // record names must be unique within an Avro schema.
-  private static final String DEFAULT_SCHEMA_NAME = "baseRecord";
-  private static final String RECORD_NAME_PREFIX = "record_";
-
-  // Constants for Avro logical types, in particular, for DECIMAL.
-  private static final String AVRO_LOGICAL_TYPE = "logicalType";
-  private static final String PRECISION_PROP_NAME = "precision";
-  private static final String SCALE_PROP_NAME = "scale";
-  private static final String AVRO_DECIMAL_TYPE = "decimal";
-
-  // Used to generate unique record names as required by Avro.
-  private int recordCounter_ = 0;
-
-  public static Schema convertColumns(
-      List<Column> columns, String schemaName) {
-    AvroSchemaConverter converter = new AvroSchemaConverter();
-    return converter.convertColumnsImpl(columns, schemaName);
-  }
-
-  public static Schema convertColumnDefs(
-      List<ColumnDef> colDefs, String schemaName) {
-    AvroSchemaConverter converter = new AvroSchemaConverter();
-    return converter.convertColumnDefsImpl(colDefs, schemaName);
-  }
-
-  public static Schema convertFieldSchemas(
-      List<FieldSchema> fieldSchemas, String schemaName) {
-    AvroSchemaConverter converter = new AvroSchemaConverter();
-    return converter.convertFieldSchemasImpl(fieldSchemas, schemaName);
-  }
-
-  public static Schema convertTable(Table impalaTable) {
-    return convertColumns(impalaTable.getColumns(), impalaTable.getFullName());
-  }
-
-  private Schema convertColumnsImpl(List<Column> columns, String schemaName) {
-    List<Schema.Field> avroFields = Lists.newArrayList();
-    for (Column column: columns) {
-      final Schema.Field avroField = new Schema.Field(column.getName(),
-          createAvroSchema(column.getType()), column.getComment(), null);
-      avroFields.add(avroField);
-    }
-    return createAvroRecord(avroFields, schemaName);
-  }
-
-  private Schema convertColumnDefsImpl(List<ColumnDef> colDefs, String schemaName) {
-    List<Schema.Field> avroFields = Lists.newArrayList();
-    for (ColumnDef colDef: colDefs) {
-      final Schema.Field avroField = new Schema.Field(colDef.getColName(),
-          createAvroSchema(colDef.getType()), colDef.getComment(), null);
-      avroFields.add(avroField);
-    }
-    return createAvroRecord(avroFields, schemaName);
-  }
-
-  private Schema convertFieldSchemasImpl(
-      List<FieldSchema> fieldSchemas, String schemaName) {
-    List<Schema.Field> avroFields = Lists.newArrayList();
-    JsonNode nullDefault = JsonNodeFactory.instance.nullNode();
-    for (FieldSchema fs: fieldSchemas) {
-      Type impalaType = Type.parseColumnType(fs.getType());
-      if (impalaType == null) {
-        throw new UnsupportedOperationException(
-            fs.getType() + " is not a suppported Impala type");
-      }
-      final Schema.Field avroField = new Schema.Field(fs.getName(),
-          createAvroSchema(impalaType), fs.getComment(), nullDefault);
-      avroFields.add(avroField);
-    }
-    return createAvroRecord(avroFields, schemaName);
-  }
-
-  private Schema createAvroRecord(List<Schema.Field> avroFields, String schemaName) {
-    // Name is a required property for an Avro Record.
-    if (schemaName == null || schemaName.isEmpty()) schemaName = DEFAULT_SCHEMA_NAME;
-    Schema schema = Schema.createRecord(schemaName, null, null, false);
-    schema.setFields(avroFields);
-    return schema;
-  }
-
-  private Schema createAvroSchema(Type impalaType) {
-    Schema schema = null;
-    if (impalaType.isScalarType()) {
-      schema = createScalarSchema((ScalarType) impalaType);
-    } else if (impalaType.isArrayType()) {
-      schema = createArraySchema((ArrayType) impalaType);
-    } else if (impalaType.isMapType()) {
-      schema = createMapSchema((MapType) impalaType);
-    } else if (impalaType.isStructType()) {
-      schema = createRecordSchema((StructType) impalaType);
-    } else {
-      throw new UnsupportedOperationException(
-          impalaType.toSql() + " cannot be converted to an Avro type");
-    }
-    // Make the Avro schema nullable.
-    Schema nullSchema = Schema.create(Schema.Type.NULL);
-    return Schema.createUnion(Arrays.asList(nullSchema, schema));
-  }
-
-  private Schema createScalarSchema(ScalarType impalaScalarType) {
-    switch (impalaScalarType.getPrimitiveType()) {
-      case STRING: return Schema.create(Schema.Type.STRING);
-      case CHAR: return Schema.create(Schema.Type.STRING);
-      case VARCHAR: return Schema.create(Schema.Type.STRING);
-      case TINYINT: return Schema.create(Schema.Type.INT);
-      case SMALLINT: return Schema.create(Schema.Type.INT);
-      case INT: return Schema.create(Schema.Type.INT);
-      case BIGINT: return Schema.create(Schema.Type.LONG);
-      case BOOLEAN: return Schema.create(Schema.Type.BOOLEAN);
-      case FLOAT: return Schema.create(Schema.Type.FLOAT);
-      case DOUBLE: return Schema.create(Schema.Type.DOUBLE);
-      case TIMESTAMP: return Schema.create(Schema.Type.STRING);
-      case DECIMAL: return createDecimalSchema(impalaScalarType);
-      default:
-        throw new UnsupportedOperationException(
-            impalaScalarType.toSql() + " cannot be converted to an Avro type");
-    }
-  }
-
-  private Schema createDecimalSchema(ScalarType impalaDecimalType) {
-    Schema decimalSchema = Schema.create(Schema.Type.BYTES);
-    decimalSchema.addProp(AVRO_LOGICAL_TYPE, AVRO_DECIMAL_TYPE);
-    // precision and scale must be integer values
-    decimalSchema.addProp(PRECISION_PROP_NAME,
-        new IntNode(impalaDecimalType.decimalPrecision()));
-    decimalSchema.addProp(SCALE_PROP_NAME,
-        new IntNode(impalaDecimalType.decimalScale()));
-    return decimalSchema;
-  }
-
-  private Schema createArraySchema(ArrayType impalaArrayType) {
-    Schema elementSchema = createAvroSchema(impalaArrayType.getItemType());
-    return Schema.createArray(elementSchema);
-  }
-
-  private Schema createMapSchema(MapType impalaMapType) {
-    // Map keys are always STRING according to the Avro spec.
-    Schema valueSchema = createAvroSchema(impalaMapType.getValueType());
-    return Schema.createMap(valueSchema);
-  }
-
-  private Schema createRecordSchema(StructType impalaStructType) {
-    List<Schema.Field> schemaFields = Lists.newArrayList();
-    for (StructField structField : impalaStructType.getFields()) {
-      Schema.Field avroField = new Schema.Field(structField.getName(),
-          createAvroSchema(structField.getType()), structField.getComment(), null);
-      schemaFields.add(avroField);
-    }
-    // All Avro records in a table must have the name property.
-    Schema structSchema = Schema.createRecord(
-        RECORD_NAME_PREFIX + recordCounter_, null, null, false);
-    ++recordCounter_;
-    structSchema.setFields(schemaFields);
-    return structSchema;
-  }
-}
\ No newline at end of file


[11/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java b/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
deleted file mode 100644
index 57574fe..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
+++ /dev/null
@@ -1,84 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.google.common.collect.Sets;
-
-/**
- * Singleton class that represents a snapshot of the Impalad cluster membership.  Host
- * membership is determined by both IP address and hostname (to mimic the backend's
- * SimpleScheduler).  A new snapshot is created whenever the cluster membership changes
- * so that clients don't need to hold a lock while examining a snapshot.
- */
-public class MembershipSnapshot {
-
-  // The latest instance of the MembershipSnapshot.
-  private static AtomicReference<MembershipSnapshot> cluster_ =
-      new AtomicReference<MembershipSnapshot>(new MembershipSnapshot());
-
-  // The set of hosts that are members of the cluster given by hostname.
-  private final Set<String> hostnames_;
-
-  // The set of hosts that are members of the cluster given by IP address.
-  private final Set<String> ipAddresses_;
-
-  // The number of nodes of the cluster.  Normally, this will be equal to
-  // hostnames_.size(), except in the test minicluster where there are multiple
-  // impalad's running on a single host.
-  private final int numNodes_;
-
-  // Used only to construct the initial MembershipSnapshot.  Before we get the first
-  // snapshot, assume one node (the localhost) to mimic SimpleScheduler.
-  private MembershipSnapshot() {
-    hostnames_ = Sets.newHashSet();
-    ipAddresses_ = Sets.newHashSet();
-    numNodes_ = 1;
-  }
-
-  // Construct a new snapshot based on the TUpdateMembershipRequest.
-  private MembershipSnapshot(TUpdateMembershipRequest request) {
-    hostnames_ = request.getHostnames();
-    ipAddresses_ = request.getIp_addresses();
-    numNodes_ = request.getNum_nodes();
-  }
-
-  // Determine whether a host, given either by IP address or hostname, is a member of
-  // this snapshot.  Returns true if it is, false otherwise.
-  public boolean contains(TNetworkAddress address) {
-    String host = address.getHostname();
-    return ipAddresses_.contains(host) || hostnames_.contains(host);
-  }
-
-  // The number of nodes in this snapshot.
-  public int numNodes() { return numNodes_; }
-
-  // Atomically update the singleton snapshot instance.  After the update completes,
-  // all calls to getCluster() will return the new snapshot.
-  public static void update(TUpdateMembershipRequest request) {
-    cluster_.set(new MembershipSnapshot(request));
-  }
-
-  // Return the current singleton snapshot instance.
-  public static MembershipSnapshot getCluster() { return cluster_.get(); }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java b/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
deleted file mode 100644
index 540c749..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
+++ /dev/null
@@ -1,172 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Utility methods for interacting with the Hive Metastore.
- */
-public class MetaStoreUtil {
-  private static final Logger LOG = Logger.getLogger(MetaStoreUtil.class);
-
-  // Maximum comment length, e.g., for columns, that can be stored in the HMS.
-  // This number is a lower bound of the constraint set in the HMS DB schema,
-  // because the constraint varies among different backing databases, e.g.,
-  // for Postgres it is 4000, but for most other databases it is 256.
-  public static final int CREATE_MAX_COMMENT_LENGTH = 256;
-
-  // Maximum length of the string representation of a type that the HMS can store.
-  public static final int MAX_TYPE_NAME_LENGTH = 4000;
-
-  // The longest strings Hive accepts for [serde] property keys.
-  public static final int MAX_PROPERTY_KEY_LENGTH = 256;
-
-  // The longest strings Hive accepts for [serde] property values.
-  public static final int MAX_PROPERTY_VALUE_LENGTH = 4000;
-
-  // The default maximum number of partitions to fetch from the Hive metastore in one
-  // RPC.
-  private static final short DEFAULT_MAX_PARTITIONS_PER_RPC = 1000;
-
-  // The maximum number of partitions to fetch from the metastore in one RPC.
-  // Read from the 'hive.metastore.batch.retrieve.table.partition.max' Hive configuration
-  // and defaults to DEFAULT_MAX_PARTITION_BATCH_SIZE if the value is not present in the
-  // Hive configuration.
-  private static short maxPartitionsPerRpc_ = DEFAULT_MAX_PARTITIONS_PER_RPC;
-
-  static {
-    // Get the value from the Hive configuration, if present.
-    HiveConf hiveConf = new HiveConf(HdfsTable.class);
-    String strValue = hiveConf.get(
-        HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString());
-    if (strValue != null) {
-      try {
-        maxPartitionsPerRpc_ = Short.parseShort(strValue);
-      } catch (NumberFormatException e) {
-        LOG.error("Error parsing max partition batch size from HiveConfig: ", e);
-      }
-    }
-    if (maxPartitionsPerRpc_ <= 0) {
-      LOG.error(String.format("Invalid value for max partition batch size: %d. Using " +
-          "default: %d", maxPartitionsPerRpc_, DEFAULT_MAX_PARTITIONS_PER_RPC));
-      maxPartitionsPerRpc_ = DEFAULT_MAX_PARTITIONS_PER_RPC;
-    }
-  }
-
-  /**
-   * Fetches all partitions for a table in batches, with each batch containing at most
-   * 'maxPartsPerRpc' partitions. Returns a List containing all fetched Partitions.
-   * Will throw a MetaException if existing partitions are dropped while a fetch is in
-   * progress. To help protect against this, the operation can be retried if there is
-   * a MetaException by setting the "numRetries" parameter.
-   * Failures due to thrift exceptions (TExceptions) are not retried because they
-   * generally mean the connection is broken or has timed out. The HiveClient supports
-   * configuring retires at the connection level so it can be enabled independently.
-   */
-  public static List<org.apache.hadoop.hive.metastore.api.Partition> fetchAllPartitions(
-      IMetaStoreClient client, String dbName, String tblName, int numRetries)
-      throws MetaException, TException {
-    Preconditions.checkArgument(numRetries >= 0);
-    int retryAttempt = 0;
-    while (true) {
-      try {
-        // First, get all partition names that currently exist.
-        List<String> partNames = client.listPartitionNames(dbName, tblName, (short) -1);
-        return MetaStoreUtil.fetchPartitionsByName(client, partNames, dbName, tblName);
-      } catch (MetaException e) {
-        // Only retry for MetaExceptions, since TExceptions could indicate a broken
-        // connection which we can't recover from by retrying.
-        if (retryAttempt < numRetries) {
-          LOG.error(String.format("Error fetching partitions for table: %s.%s. " +
-              "Retry attempt: %d/%d", dbName, tblName, retryAttempt, numRetries), e);
-          ++retryAttempt;
-          // TODO: Sleep for a bit?
-        } else {
-          throw e;
-        }
-      }
-    }
-  }
-
-  /**
-   * Given a List of partition names, fetches the matching Partitions from the HMS
-   * in batches. Each batch will contain at most 'maxPartsPerRpc' partitions.
-   * Returns a List containing all fetched Partitions.
-   * Will throw a MetaException if any partitions in 'partNames' do not exist.
-   */
-  public static List<Partition> fetchPartitionsByName(
-      IMetaStoreClient client, List<String> partNames, String dbName, String tblName)
-      throws MetaException, TException {
-    LOG.trace(String.format("Fetching %d partitions for: %s.%s using partition " +
-        "batch size: %d", partNames.size(), dbName, tblName, maxPartitionsPerRpc_));
-
-    List<org.apache.hadoop.hive.metastore.api.Partition> fetchedPartitions =
-        Lists.newArrayList();
-    // Fetch the partitions in batches.
-    for (int i = 0; i < partNames.size(); i += maxPartitionsPerRpc_) {
-      // Get a subset of partition names to fetch.
-      List<String> partsToFetch =
-          partNames.subList(i, Math.min(i + maxPartitionsPerRpc_, partNames.size()));
-      // Fetch these partitions from the metastore.
-      fetchedPartitions.addAll(
-          client.getPartitionsByNames(dbName, tblName, partsToFetch));
-    }
-    return fetchedPartitions;
-  }
-
-  /**
-   * Checks that a given 'property' is short enough for HMS to handle. If not, throws an
-   * 'AnalysisException' with 'name' as its prefix.
-   */
-  public static void checkShortProperty(String name, String property, int length)
-      throws AnalysisException {
-    if (property.length() > length) {
-      throw new AnalysisException(
-          name + " length must be <= " + length + ": " + property.length());
-    }
-  }
-
-  /**
-   * Checks that each key and value in a proprty map is short enough for HMS to handle. If
-   * not, An 'AnalysisException' is thrown with 'mapName' as its prefix.
-   */
-  public static void checkShortPropertyMap(
-      String mapName, Map<String, String> propertyMap) throws AnalysisException {
-    if (null != propertyMap) {
-      for (Map.Entry<String, String> property : propertyMap.entrySet()) {
-        checkShortProperty(mapName + " key", property.getKey(), MAX_PROPERTY_KEY_LENGTH);
-        checkShortProperty(
-            mapName + " value", property.getValue(), MAX_PROPERTY_VALUE_LENGTH);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java b/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
deleted file mode 100644
index 08c929b..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.File;
-
-public class NativeLibUtil {
-  /**
-   * Attempts to load the given library from all paths in java.libary.path.
-   * Throws a RuntimeException if the library was unable to be loaded from
-   * any location.
-   */
-  public static void loadLibrary(String libFileName) {
-    boolean found = false;
-    String javaLibPath = System.getProperty("java.library.path");
-    for (String path: javaLibPath.split(":")) {
-      File libFile = new File(path + File.separator + libFileName);
-      if (libFile.exists()) {
-        System.load(libFile.getPath());
-        found = true;
-        break;
-      }
-    }
-    if (!found) {
-      throw new RuntimeException("Failed to load " + libFileName + " from any " +
-          "location in java.library.path (" + javaLibPath + ").");
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java b/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
deleted file mode 100644
index 9956ca4..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that manages loading and calling the native logging library to forward
- * log4j log messages to be logged by glog.
- */
-public class NativeLogger {
-  private final static Logger LOG = LoggerFactory.getLogger(NativeLogger.class);
-  private static boolean loaded_ = false;
-
-  // Writes a log message to glog
-  private native static void Log(int severity, String msg, String filename, int line);
-
-  public static void LogToGlog(int severity, String msg, String filename, int line) {
-    try {
-      Log(severity, msg, filename, line);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-      Log(severity, msg, filename, line);
-    }
-  }
-
-  /**
-   * Loads the native logging support library.
-   */
-  private static synchronized void loadLibrary() {
-    if (loaded_) return;
-    NativeLibUtil.loadLibrary("libloggingsupport.so");
-    loaded_ = true;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java b/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
deleted file mode 100644
index bc85d18..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
+++ /dev/null
@@ -1,108 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.google.common.collect.Lists;
-
-/**
- * Utility class to handle pattern-matching for different types of patterns (
- * e.g. hive SHOW patterns, JDBC patterns).
- * It maps those patterns onto the java regex pattern objects.
- */
-public class PatternMatcher {
-  // Patterns to match against. A string is considered to match if it matches
-  // any of the patterns.
-  private List<Pattern> patterns_;
-
-  // Returns true if patterns_ is null or the candidate matches.
-  // Returns false if patterns_ is empty or the candidate mismatches.
-  public boolean matches(String candidate) {
-    if (patterns_ == null) return true;
-    if (patterns_.isEmpty()) return false;
-    for (Pattern pattern: patterns_) {
-      if (pattern.matcher(candidate).matches()) return true;
-    }
-    return false;
-  }
-
-  // Immutable pattern matcher that matches all
-  private final static class MatchAllPatternMatcher extends PatternMatcher {
-    MatchAllPatternMatcher() {}
-    public boolean matches(String candidate) { return true; }
-  }
-
-  // Immutable pattern matcher that matches none
-  private final static class MatchNonePatternMatcher extends PatternMatcher {
-    MatchNonePatternMatcher() {}
-    public boolean matches(String candidate) { return false; }
-  }
-
-  public static final PatternMatcher MATCHER_MATCH_ALL = new MatchAllPatternMatcher();
-  public static final PatternMatcher MATCHER_MATCH_NONE = new MatchNonePatternMatcher();
-
-  /**
-   * Creates a pattern matcher for hive patterns.
-   * The only metacharacters are '*' which matches any string of characters, and '|'
-   * which denotes choice.
-   * If hivePattern is null, all strings are considered to match. If it is the
-   * empty string, no strings match.
-   */
-  public static PatternMatcher createHivePatternMatcher(String hivePattern) {
-    if (hivePattern == null) return MATCHER_MATCH_ALL;
-    if (hivePattern.isEmpty()) return MATCHER_MATCH_NONE;
-    PatternMatcher result = new PatternMatcher();
-    result.patterns_ = Lists.newArrayList();
-    // Hive ignores pretty much all metacharacters, so we have to escape them.
-    final String metaCharacters = "+?.^()]\\/{}";
-    final Pattern regex = Pattern.compile("([" + Pattern.quote(metaCharacters) + "])");
-
-    for (String pattern: Arrays.asList(hivePattern.split("\\|"))) {
-      Matcher matcher = regex.matcher(pattern);
-      pattern = matcher.replaceAll("\\\\$1").replace("*", ".*");
-      result.patterns_.add(Pattern.compile(pattern));
-    }
-    return result;
-  }
-
-  /**
-   * Creates a matcher object for JDBC match strings.
-   */
-  public static PatternMatcher createJdbcPatternMatcher(String pattern) {
-    if (pattern == null || pattern.isEmpty()) {
-      return MATCHER_MATCH_ALL;
-    }
-    String wildcardPattern = ".*";
-    String result = pattern
-        .replaceAll("([^\\\\])%", "$1" + wildcardPattern)
-        .replaceAll("\\\\%", "%")
-        .replaceAll("^%", wildcardPattern)
-        .replaceAll("([^\\\\])_", "$1.")
-        .replaceAll("\\\\_", "_")
-        .replaceAll("^_", ".");
-    PatternMatcher matcher = new PatternMatcher();
-    matcher.patterns_ = Lists.newArrayList();
-    matcher.patterns_.add(Pattern.compile(result));
-    return matcher;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java b/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
deleted file mode 100644
index c1c9bd7..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
+++ /dev/null
@@ -1,451 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.ByteUnits;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TPoolConfigParams;
-import com.cloudera.impala.thrift.TPoolConfig;
-import com.cloudera.impala.thrift.TResolveRequestPoolParams;
-import com.cloudera.impala.thrift.TResolveRequestPoolResult;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.util.FileWatchService.FileChangeListener;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Admission control utility class that provides user to request pool mapping, ACL
- * enforcement, and pool configuration values. Pools are configured via a fair scheduler
- * allocation file (fair-scheduler.xml) and Llama configuration (llama-site.xml). This
- * class wraps a number of Hadoop classes to provide the user to pool mapping,
- * authorization, and accessing memory resource limits, all of which are specified in
- * the fair scheduler allocation file. The other pool limits are specified in the
- * Llama configuration, and those properties are accessed via the standard
- * {@link Configuration} API.
- *
- * Both the allocation configuration and Llama configuration files are watched for
- * changes and reloaded when necessary. The allocation file is watched/loaded using the
- * Yarn {@link AllocationFileLoaderService} and the Llama configuration uses a subclass of
- * the {@link FileWatchService}. There are two different mechanisms because there is
- * different parsing/configuration code for the allocation file and the Llama
- * configuration (which is a regular Hadoop conf file so it can use the
- * {@link Configuration} class). start() and stop() will start/stop watching and reloading
- * both of these files.
- *
- * A single instance is created by the backend and lasts the duration of the process.
- */
-public class RequestPoolService {
-  final static Logger LOG = LoggerFactory.getLogger(RequestPoolService.class);
-
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-  // Used to ensure start() has been called before any other methods can be used.
-  private final AtomicBoolean running_;
-
-  // Key for the default maximum number of running queries ("placed reservations")
-  // property. The per-pool key name is this key with the pool name appended, e.g.
-  // "{key}.{pool}". This is a llama-site.xml configuration.
-  final static String LLAMA_MAX_PLACED_RESERVATIONS_KEY =
-      "llama.am.throttling.maximum.placed.reservations";
-
-  // Default value for the maximum.placed.reservations property. Note that this value
-  // differs from the current Llama default of 10000.
-  final static int LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT = -1;
-
-  // Key for the default maximum number of queued requests ("queued reservations")
-  // property. The per-pool key name is this key with the pool name appended, e.g.
-  // "{key}.{pool}". This is a llama-site.xml configuration.
-  final static String LLAMA_MAX_QUEUED_RESERVATIONS_KEY =
-      "llama.am.throttling.maximum.queued.reservations";
-
-  // Default value for the maximum.queued.reservations property. Note that this value
-  // differs from the current Llama default of 0 which disables queuing.
-  final static int LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT = 200;
-
-  // Key for the pool queue timeout (milliseconds). This is be specified in the
-  // llama-site.xml but is Impala-specific and Llama does not use this.
-  final static String QUEUE_TIMEOUT_KEY = "impala.admission-control.pool-queue-timeout-ms";
-
-  // Default value of the pool queue timeout (ms).
-  final static int QUEUE_TIMEOUT_MS_DEFAULT = 60 * 1000;
-
-  // Key for the pool default query options. Query options are specified as a
-  // comma delimited string of 'key=value' pairs, e.g. 'key1=val1,key2=val2'.
-  // This is specified in the llama-site.xml but is Impala-specific and Llama does not
-  // use this.
-  final static String QUERY_OPTIONS_KEY = "impala.admission-control.pool-default-query-options";
-
-  // String format for a per-pool configuration key. First parameter is the key for the
-  // default, e.g. LLAMA_MAX_PLACED_RESERVATIONS_KEY, and the second parameter is the
-  // pool name.
-  final static String LLAMA_PER_POOL_CONFIG_KEY_FORMAT = "%s.%s";
-
-  // Watches for changes to the fair scheduler allocation file.
-  @VisibleForTesting
-  final AllocationFileLoaderService allocLoader_;
-
-  // Provides access to the fair scheduler allocation file. An AtomicReference becaus it
-  // is reset when the allocation configuration file changes and other threads access it.
-  private final AtomicReference<AllocationConfiguration> allocationConf_;
-
-  // Watches the Llama configuration file for changes.
-  @VisibleForTesting
-  final FileWatchService llamaConfWatcher_;
-
-  // Used by this class to access to the configs provided by the Llama configuration.
-  // This is replaced when the Llama configuration file changes.
-  private volatile Configuration llamaConf_;
-
-  // URL of the Llama configuration file.
-  private final URL llamaConfUrl_;
-
-  /**
-   * Updates the Llama configuration when the file changes. The file is llamaConfUrl_
-   * and it will exist when this is created (or RequestPoolService will not start). If
-   * the file is later removed, warnings will be written to the log but the previous
-   * configuration will still be accessible.
-   */
-  private final class LlamaConfWatcher implements FileChangeListener {
-    public void onFileChange() {
-      // If llamaConfUrl_ is null the watcher should not have been created.
-      Preconditions.checkNotNull(llamaConfUrl_);
-      LOG.info("Loading Llama configuration: " + llamaConfUrl_.getFile());
-      Configuration conf = new Configuration();
-      conf.addResource(llamaConfUrl_);
-      llamaConf_ = conf;
-    }
-  }
-
-  /**
-   * Creates a RequestPoolService instance with a configuration containing the specified
-   * fair-scheduler.xml and llama-site.xml.
-   *
-   * @param fsAllocationPath path to the fair scheduler allocation file.
-   * @param llamaSitePath path to the Llama configuration file.
-   */
-  public RequestPoolService(final String fsAllocationPath, final String llamaSitePath) {
-    Preconditions.checkNotNull(fsAllocationPath);
-    running_ = new AtomicBoolean(false);
-    allocationConf_ = new AtomicReference<AllocationConfiguration>();
-    URL fsAllocationURL = getURL(fsAllocationPath);
-    if (fsAllocationURL == null) {
-      throw new IllegalArgumentException(
-          "Unable to find allocation configuration file: " + fsAllocationPath);
-    }
-    Configuration allocConf = new Configuration(false);
-    allocConf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocationURL.getPath());
-    allocLoader_ = new AllocationFileLoaderService();
-    allocLoader_.init(allocConf);
-
-    if (!Strings.isNullOrEmpty(llamaSitePath)) {
-      llamaConfUrl_ = getURL(llamaSitePath);
-      if (llamaConfUrl_ == null) {
-        throw new IllegalArgumentException(
-            "Unable to find Llama configuration file: " + llamaSitePath);
-      }
-      llamaConf_ = new Configuration(false);
-      llamaConf_.addResource(llamaConfUrl_);
-      llamaConfWatcher_ = new FileWatchService(new File(llamaConfUrl_.getPath()),
-          new LlamaConfWatcher());
-    } else {
-      llamaConfWatcher_ = null;
-      llamaConfUrl_ = null;
-    }
-  }
-
-  /**
-   * Returns a {@link URL} for the file if it exists, null otherwise.
-   */
-  @VisibleForTesting
-  static URL getURL(String path) {
-    Preconditions.checkNotNull(path);
-    File file = new File(path);
-    file = file.getAbsoluteFile();
-    if (!file.exists()) {
-      LOG.error("Unable to find specified file: " + path);
-      return null;
-    }
-    try {
-      return file.toURI().toURL();
-    } catch (MalformedURLException ex) {
-      LOG.error("Unable to construct URL for file: " + path, ex);
-      return null;
-    }
-  }
-
-  /**
-   * Starts the RequestPoolService instance. It does the initial loading of the
-   * configuration and starts the automatic reloading.
-   */
-  public void start() {
-    Preconditions.checkState(!running_.get());
-    allocLoader_.setReloadListener(new AllocationFileLoaderService.Listener() {
-      @Override
-      public void onReload(AllocationConfiguration info) {
-        allocationConf_.set(info);
-      }
-    });
-    allocLoader_.start();
-    try {
-      allocLoader_.reloadAllocations();
-    } catch (Exception ex) {
-      try {
-        stopInternal();
-      } catch (Exception stopEx) {
-        LOG.error("Unable to stop AllocationFileLoaderService after failed start.",
-            stopEx);
-      }
-      throw new RuntimeException(ex);
-    }
-    if (llamaConfWatcher_ != null) llamaConfWatcher_.start();
-    running_.set(true);
-  }
-
-  /**
-   * Stops the RequestPoolService instance. Only used by tests.
-   */
-  public void stop() {
-    Preconditions.checkState(running_.get());
-    stopInternal();
-  }
-
-  /**
-   * Stops the RequestPoolService instance without checking the running state. Only
-   * called by stop() (which is only used in tests) or by start() if a failure occurs.
-   * Should not be called more than once.
-   */
-  private void stopInternal() {
-    running_.set(false);
-    if (llamaConfWatcher_ != null) llamaConfWatcher_.stop();
-    allocLoader_.stop();
-  }
-
-  /**
-   * Resolves a user and pool to the pool specified by the allocation placement policy
-   * and checks if the user is authorized to submit requests.
-   *
-   * @param thriftResolvePoolParams Serialized {@link TResolveRequestPoolParams}
-   * @return serialized {@link TResolveRequestPoolResult}
-   */
-  public byte[] resolveRequestPool(byte[] thriftResolvePoolParams)
-      throws ImpalaException {
-    TResolveRequestPoolParams resolvePoolParams = new TResolveRequestPoolParams();
-    JniUtil.deserializeThrift(protocolFactory_, resolvePoolParams,
-        thriftResolvePoolParams);
-    TResolveRequestPoolResult result = resolveRequestPool(resolvePoolParams);
-    LOG.info("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}",
-        new Object[] { resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(),
-                       result.resolved_pool, result.has_access });
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  TResolveRequestPoolResult resolveRequestPool(
-      TResolveRequestPoolParams resolvePoolParams) throws InternalException {
-    String requestedPool = resolvePoolParams.getRequested_pool();
-    String user = resolvePoolParams.getUser();
-    TResolveRequestPoolResult result = new TResolveRequestPoolResult();
-    String errorMessage = null;
-    String pool = null;
-    try {
-      pool = assignToPool(requestedPool, user);
-    } catch (IOException ex) {
-      errorMessage = ex.getMessage();
-      if (errorMessage.startsWith("No groups found for user")) {
-        // The error thrown when using the 'primaryGroup' or 'secondaryGroup' rules and
-        // the user does not exist are not helpful.
-        errorMessage = String.format(
-            "Failed to resolve user '%s' to a pool while evaluating the " +
-            "'primaryGroup' or 'secondaryGroup' queue placement rules because no " +
-            "groups were found for the user. This is likely because the user does not " +
-            "exist on the local operating system.", resolvePoolParams.getUser());
-      }
-      LOG.warn(String.format("Error assigning to pool. requested='%s', user='%s', msg=%s",
-          requestedPool, user, errorMessage), ex);
-    }
-    if (pool == null) {
-      if (errorMessage == null) {
-        // This occurs when assignToPool returns null (not an error), i.e. if the pool
-        // cannot be resolved according to the policy.
-        result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
-      } else {
-        // If Yarn throws an exception, return an error status.
-        result.setStatus(
-            new TStatus(TErrorCode.INTERNAL_ERROR, Lists.newArrayList(errorMessage)));
-      }
-    } else {
-      result.setResolved_pool(pool);
-      result.setHas_access(hasAccess(pool, user));
-      result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
-    }
-    return result;
-  }
-
-  /**
-   * Gets the pool configuration values for the specified pool.
-   *
-   * @param thriftPoolConfigParams Serialized {@link TPoolConfigParams}
-   * @return serialized {@link TPoolConfig}
-   */
-  public byte[] getPoolConfig(byte[] thriftPoolConfigParams) throws ImpalaException {
-    Preconditions.checkState(running_.get());
-    TPoolConfigParams poolConfigParams = new TPoolConfigParams();
-    JniUtil.deserializeThrift(protocolFactory_, poolConfigParams,
-        thriftPoolConfigParams);
-    TPoolConfig result = getPoolConfig(poolConfigParams.getPool());
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  TPoolConfig getPoolConfig(String pool) {
-    TPoolConfig result = new TPoolConfig();
-    long maxMemoryMb = allocationConf_.get().getMaxResources(pool).getMemory();
-    result.setMax_mem_resources(
-        maxMemoryMb == Integer.MAX_VALUE ? -1 : (long) maxMemoryMb * ByteUnits.MEGABYTE);
-    if (llamaConf_ == null) {
-      result.setMax_requests(LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT);
-      result.setMax_queued(LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT);
-      result.setDefault_query_options("");
-    } else {
-      // Capture the current llamaConf_ in case it changes while we're using it.
-      Configuration currentLlamaConf = llamaConf_;
-      result.setMax_requests(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          LLAMA_MAX_PLACED_RESERVATIONS_KEY,
-          LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT));
-      result.setMax_queued(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          LLAMA_MAX_QUEUED_RESERVATIONS_KEY,
-          LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT));
-
-      // Only return positive values. Admission control has a default from gflags.
-      int queueTimeoutMs = getLlamaPoolConfigValue(currentLlamaConf, pool,
-          QUEUE_TIMEOUT_KEY, -1);
-      if (queueTimeoutMs > 0) result.setQueue_timeout_ms(queueTimeoutMs);
-      result.setDefault_query_options(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          QUERY_OPTIONS_KEY, ""));
-    }
-    LOG.info("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " +
-        "max_queued={},  queue_timeout_ms={}, default_query_options={}",
-        new Object[] { pool, result.max_mem_resources, result.max_requests,
-            result.max_queued, result.queue_timeout_ms, result.default_query_options });
-    return result;
-  }
-
-  /**
-   * Looks up the per-pool integer config from the llama Configuration. First checks for
-   * a per-pool value, then a default set in the config, and lastly to the specified
-   * 'defaultValue'.
-   *
-   * @param conf The Configuration to use, provided so the caller can ensure the same
-   *        Configuration is used to look up multiple properties.
-   */
-  private int getLlamaPoolConfigValue(Configuration conf, String pool, String key,
-      int defaultValue) {
-    return conf.getInt(String.format(LLAMA_PER_POOL_CONFIG_KEY_FORMAT, key, pool),
-        conf.getInt(key, defaultValue));
-  }
-
-  /**
-   * Looks up the per-pool String config from the llama Configuration. See above.
-   */
-  private String getLlamaPoolConfigValue(Configuration conf, String pool, String key,
-      String defaultValue) {
-    return conf.get(String.format(LLAMA_PER_POOL_CONFIG_KEY_FORMAT, key, pool),
-        conf.get(key, defaultValue));
-  }
-
-  /**
-   * Resolves the actual pool to use via the allocation placement policy. The policy may
-   * change the requested pool.
-   *
-   * @param requestedPool The requested pool. May not be null, an empty string indicates
-   * the policy should return the default pool for this user.
-   * @param user The user, must not be null or empty.
-   * @return the actual pool to use, null if a pool could not be resolved.
-   */
-  @VisibleForTesting
-  String assignToPool(String requestedPool, String user)
-      throws InternalException, IOException {
-    Preconditions.checkState(running_.get());
-    Preconditions.checkNotNull(requestedPool);
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(user));
-    // Convert the user name to a short name (e.g. 'user1@domain' to 'user1') because
-    // assignAppToQueue() will check group membership which should always be done on
-    // the short name of the principal.
-    String shortName = new User(user).getShortName();
-    return allocationConf_.get().getPlacementPolicy().assignAppToQueue(
-        requestedPool.isEmpty() ? YarnConfiguration.DEFAULT_QUEUE_NAME : requestedPool,
-        shortName);
-  }
-
-  /**
-   * Indicates if a user has access to the pool.
-   *
-   * @param pool the pool to check if the user has access to. NOTE: it should always be
-   * called with a pool returned by the {@link #assignToPool(String, String)} method.
-   * @param user the user to check if it has access to the pool.
-   * @return True if the user has access to the pool.
-   */
-  @VisibleForTesting
-  boolean hasAccess(String pool, String user) throws InternalException {
-    Preconditions.checkState(running_.get());
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(pool));
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(user));
-    // Convert the user name to a short name (e.g. 'user1@domain' to 'user1') because
-    // the UserGroupInformation will check group membership which should always be done
-    // on the short name of the principal.
-    String shortName;
-    User requestingUser = new User(user);
-    shortName = requestingUser.getShortName();
-    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(shortName);
-    return allocationConf_.get().hasAccess(pool, QueueACL.SUBMIT_APPLICATIONS, ugi);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java b/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
deleted file mode 100644
index 4f39b3c..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
+++ /dev/null
@@ -1,466 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-
-import org.apache.sentry.SentryUserException;
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
-import org.apache.sentry.provider.db.SentryAlreadyExistsException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption;
-import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-import org.apache.sentry.service.thrift.SentryServiceClientFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.PrivilegeSpec;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- *  Wrapper around the SentryService APIs that are used by Impala and Impala tests.
- */
-public class SentryPolicyService {
-  private final static Logger LOG = LoggerFactory.getLogger(SentryPolicyService.class);
-  private final String ACCESS_DENIED_ERROR_MSG =
-      "User '%s' does not have privileges to execute: %s";
-  private final SentryConfig config_;
-
-  /**
-   * Wrapper around a SentryPolicyServiceClient.
-   * TODO: When SENTRY-296 is resolved we can more easily cache connections instead of
-   * opening a new connection for each request.
-   */
-  class SentryServiceClient {
-    private final SentryPolicyServiceClient client_;
-
-    /**
-     * Creates and opens a new Sentry Service thrift client.
-     */
-    public SentryServiceClient() throws InternalException {
-      client_ = createClient();
-    }
-
-    /**
-     * Get the underlying SentryPolicyServiceClient.
-     */
-    public SentryPolicyServiceClient get() {
-      return client_;
-    }
-
-    /**
-     * Returns this client back to the connection pool. Can be called multiple times.
-     */
-    public void close() {
-      client_.close();
-    }
-
-    /**
-     * Creates a new client to the SentryService.
-     */
-    private SentryPolicyServiceClient createClient() throws InternalException {
-      SentryPolicyServiceClient client;
-      try {
-        client = SentryServiceClientFactory.create(config_.getConfig());
-      } catch (Exception e) {
-        throw new InternalException("Error creating Sentry Service client: ", e);
-      }
-      return client;
-    }
-  }
-
-  public SentryPolicyService(SentryConfig config) {
-    config_ = config;
-  }
-
-  /**
-   * Drops a role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to drop.
-   * @param ifExists - If true, no error is thrown if the role does not exist.
-   * @throws ImpalaException - On any error dropping the role.
-   */
-  public void dropRole(User requestingUser, String roleName, boolean ifExists)
-      throws ImpalaException {
-    LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      if (ifExists) {
-        client.get().dropRoleIfExists(requestingUser.getShortName(), roleName);
-      } else {
-        client.get().dropRole(requestingUser.getShortName(), roleName);
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "DROP_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error dropping role: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Creates a new role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to create.
-   * @param ifNotExists - If true, no error is thrown if the role already exists.
-   * @throws ImpalaException - On any error creating the role.
-   */
-  public void createRole(User requestingUser, String roleName, boolean ifNotExists)
-      throws ImpalaException {
-    LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().createRole(requestingUser.getShortName(), roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "CREATE_ROLE"));
-    } catch (SentryAlreadyExistsException e) {
-      if (ifNotExists) return;
-      throw new InternalException("Error creating role: ", e);
-    } catch (SentryUserException e) {
-      throw new InternalException("Error creating role: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Grants a role to a group.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to grant to a group. Role must already exist.
-   * @param groupName - The group to grant the role to.
-   * @throws ImpalaException - On any error.
-   */
-  public void grantRoleToGroup(User requestingUser, String roleName, String groupName)
-      throws ImpalaException {
-    LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().grantRoleToGroup(requestingUser.getShortName(), groupName, roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "GRANT_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'grantRoleToGroup' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Removes a role from a group.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role name to remove.
-   * @param groupName - The group to remove the role from.
-   * @throws InternalException - On any error.
-   */
-  public void revokeRoleFromGroup(User requestingUser, String roleName, String groupName)
-      throws ImpalaException {
-    LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().revokeRoleFromGroup(requestingUser.getShortName(),
-          groupName, roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "REVOKE_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'revokeRoleFromGroup' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Grants a privilege to an existing role.
-   */
-  public void grantRolePrivilege(User requestingUser, String roleName,
-      TPrivilege privilege) throws ImpalaException {
-    grantRolePrivileges(requestingUser, roleName, Lists.newArrayList(privilege));
-  }
-
-  /**
-   * Grants privileges to an existing role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to grant privileges to (case insensitive).
-   * @param privilege - The privilege to grant.
-   * @throws ImpalaException - On any error
-   */
-  public void grantRolePrivileges(User requestingUser, String roleName,
-      List<TPrivilege> privileges) throws ImpalaException {
-    Preconditions.checkState(!privileges.isEmpty());
-    TPrivilege privilege = privileges.get(0);
-    TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Granting role '%s' '%s' privilege on '%s' on behalf of: %s",
-        roleName, privilege.getPrivilege_level().toString(), scope.toString(),
-        requestingUser.getName()));
-    // Verify that all privileges have the same scope.
-    for (int i = 1; i < privileges.size(); ++i) {
-      Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +
-          "privileges must have the same scope.");
-    }
-    Preconditions.checkState(scope == TPrivilegeScope.COLUMN || privileges.size() == 1,
-        "Cannot grant multiple " + scope + " privileges with a singe RPC to the " +
-        "Sentry Service.");
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      switch (scope) {
-        case SERVER:
-          client.get().grantServerPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case DATABASE:
-          client.get().grantDatabasePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case TABLE:
-          client.get().grantTablePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case COLUMN:
-          client.get().grantColumnsPrivileges(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), getColumnNames(privileges),
-              privilege.getPrivilege_level().toString(), privilege.isHas_grant_opt());
-          break;
-        case URI:
-          client.get().grantURIPrivilege(requestingUser.getShortName(),
-              roleName, privilege.getServer_name(), privilege.getUri(),
-              privilege.isHas_grant_opt());
-          break;
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "GRANT_PRIVILEGE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'grantPrivilege*' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Revokes a privilege from an existing role.
-   */
-  public void revokeRolePrivilege(User requestingUser, String roleName,
-      TPrivilege privilege) throws ImpalaException {
-    revokeRolePrivileges(requestingUser, roleName, Lists.newArrayList(privilege));
-  }
-
-  /**
-   * Revokes privileges from an existing role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to revoke privileges from (case insensitive).
-   * @param privilege - The privilege to revoke.
-   * @throws ImpalaException - On any error
-   */
-  public void revokeRolePrivileges(User requestingUser, String roleName,
-      List<TPrivilege> privileges) throws ImpalaException {
-    Preconditions.checkState(!privileges.isEmpty());
-    TPrivilege privilege = privileges.get(0);
-    TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' on " +
-        "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
-        scope.toString(), requestingUser.getName()));
-    // Verify that all privileges have the same scope.
-    for (int i = 1; i < privileges.size(); ++i) {
-      Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +
-          "privileges must have the same scope.");
-    }
-    Preconditions.checkState(scope == TPrivilegeScope.COLUMN || privileges.size() == 1,
-        "Cannot revoke multiple " + scope + " privileges with a singe RPC to the " +
-        "Sentry Service.");
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      switch (scope) {
-        case SERVER:
-          client.get().revokeServerPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getPrivilege_level().toString());
-          break;
-        case DATABASE:
-          client.get().revokeDatabasePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getPrivilege_level().toString(), null);
-          break;
-        case TABLE:
-          client.get().revokeTablePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), privilege.getPrivilege_level().toString(),
-              null);
-          break;
-        case COLUMN:
-          client.get().revokeColumnsPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), getColumnNames(privileges),
-              privilege.getPrivilege_level().toString(), null);
-          break;
-        case URI:
-          client.get().revokeURIPrivilege(requestingUser.getShortName(),
-              roleName, privilege.getServer_name(), privilege.getUri(),
-              null);
-          break;
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "REVOKE_PRIVILEGE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'revokePrivilege*' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Returns the column names referenced in a list of column-level privileges.
-   * Verifies that all column-level privileges refer to the same table.
-   */
-  private List<String> getColumnNames(List<TPrivilege> privileges) {
-    List<String> columnNames = Lists.newArrayList();
-    String tablePath = PrivilegeSpec.getTablePath(privileges.get(0));
-    columnNames.add(privileges.get(0).getColumn_name());
-    // Collect all column names and verify that they belong to the same table.
-    for (int i = 1; i < privileges.size(); ++i) {
-      TPrivilege privilege = privileges.get(i);
-      Preconditions.checkState(tablePath.equals(PrivilegeSpec.getTablePath(privilege))
-          && privilege.getScope() == TPrivilegeScope.COLUMN);
-      columnNames.add(privileges.get(i).getColumn_name());
-    }
-    return columnNames;
-  }
-
-  /**
-   * Lists all roles granted to all groups a user belongs to.
-   */
-  public List<TSentryRole> listUserRoles(User requestingUser)
-      throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listUserRoles(
-          requestingUser.getShortName()));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_USER_ROLES"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'listUserRoles' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Lists all roles.
-   */
-  public List<TSentryRole> listAllRoles(User requestingUser) throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listRoles(requestingUser.getShortName()));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_ROLES"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error making 'listRoles' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Lists all privileges granted to a role.
-   */
-  public List<TSentryPrivilege> listRolePrivileges(User requestingUser, String roleName)
-      throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listAllPrivilegesByRoleName(
-          requestingUser.getShortName(), roleName));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_ROLE_PRIVILEGES"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error making 'listAllPrivilegesByRoleName' RPC to " +
-          "Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Utility function that converts a TSentryPrivilege to an Impala TPrivilege object.
-   */
-  public static TPrivilege sentryPrivilegeToTPrivilege(TSentryPrivilege sentryPriv) {
-    TPrivilege privilege = new TPrivilege();
-    privilege.setServer_name(sentryPriv.getServerName());
-    if (sentryPriv.isSetDbName()) privilege.setDb_name(sentryPriv.getDbName());
-    if (sentryPriv.isSetTableName()) privilege.setTable_name(sentryPriv.getTableName());
-    if (sentryPriv.isSetColumnName()) {
-      privilege.setColumn_name(sentryPriv.getColumnName());
-    }
-    if (sentryPriv.isSetURI()) privilege.setUri(sentryPriv.getURI());
-    privilege.setScope(Enum.valueOf(TPrivilegeScope.class,
-        sentryPriv.getPrivilegeScope().toUpperCase()));
-    if (sentryPriv.getAction().equals("*")) {
-      privilege.setPrivilege_level(TPrivilegeLevel.ALL);
-    } else {
-      privilege.setPrivilege_level(Enum.valueOf(TPrivilegeLevel.class,
-          sentryPriv.getAction().toUpperCase()));
-    }
-    privilege.setPrivilege_name(RolePrivilege.buildRolePrivilegeName(privilege));
-    privilege.setCreate_time_ms(sentryPriv.getCreateTime());
-    if (sentryPriv.isSetGrantOption() &&
-        sentryPriv.getGrantOption() == TSentryGrantOption.TRUE) {
-      privilege.setHas_grant_opt(true);
-    } else {
-      privilege.setHas_grant_opt(false);
-    }
-    return privilege;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java b/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
deleted file mode 100644
index 76e4931..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
+++ /dev/null
@@ -1,348 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.log4j.Logger;
-import org.apache.sentry.provider.db.service.thrift.TSentryGroup;
-import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Thread safe class that acts as a link between the Sentry Service and the Catalog
- * to ensure both places are updated consistently. More specifically, this class
- * synchronizes updates to the Sentry Service and the Impala catalog to ensure
- * they are applied atomically (in Impala's view) and only if reading/writing the
- * policy via the Sentry Service succeeds. Note that there may be external updates
- * to the Sentry Service that cannot be protected against.
- * It also periodically refreshes the authorization policy metadata and updates the
- * catalog with any changes. Because any catalog updates need to be synchronized with
- * updates from GRANT/REVOKE statements, it makes sense for this class to
- * synchronize all modifications.
- */
-public class SentryProxy {
-  private static final Logger LOG = Logger.getLogger(SentryProxy.class);
-
-  // Used to periodically poll the Sentry Service and updates the catalog with any
-  // changes.
-  private final ScheduledExecutorService policyReader_ =
-      Executors.newScheduledThreadPool(1);
-
-  // The Catalog the SentryPolicyUpdater is associated with.
-  private final CatalogServiceCatalog catalog_;
-
-  // The interface to access the Sentry Policy Service to read policy metadata.
-  private final SentryPolicyService sentryPolicyService_;
-
-  // This is the user that the Catalog Service is running as. For kerberized clusters,
-  // this is set to the Kerberos principal of Catalog. This user should always be a
-  // Sentry Service admin => have full rights to read/update the Sentry Service.
-  private final User processUser_;
-
-  public SentryProxy(SentryConfig sentryConfig, CatalogServiceCatalog catalog,
-      String kerberosPrincipal) {
-    Preconditions.checkNotNull(catalog);
-    Preconditions.checkNotNull(sentryConfig);
-    catalog_ = catalog;
-    if (Strings.isNullOrEmpty(kerberosPrincipal)) {
-      processUser_ = new User(System.getProperty("user.name"));
-    } else {
-      processUser_ = new User(kerberosPrincipal);
-    }
-    sentryPolicyService_ = new SentryPolicyService(sentryConfig);
-    // Sentry Service is enabled.
-    // TODO: Make this configurable
-    policyReader_.scheduleAtFixedRate(new PolicyReader(), 0, 60,
-        TimeUnit.SECONDS);
-  }
-
-  /**
-   * Refreshes the authorization policy metadata by querying the Sentry Policy Service.
-   * There is currently no way to get a snapshot of the policy from the Sentry Service,
-   * so it is possible that Impala will end up in a state that is not consistent with a
-   * state the Sentry Service has ever been in. For example, consider the case where a
-   * refresh is running and all privileges for Role A have been processed. Before moving
-   * to Role B, the user revokes a privilege from Role A and grants it to Role B.
-   * Impala will temporarily (until the next refresh) think the privilege is granted to
-   * Role A AND to Role B.
-   * TODO: Think more about consistency as well as how to recover from errors that leave
-   * the policy in a potentially inconsistent state (an RPC fails part-way through a
-   * refresh). We should also consider applying this entire update to the catalog
-   * atomically.
-   */
-  private class PolicyReader implements Runnable {
-    public void run() {
-      synchronized (SentryProxy.this) {
-        // Assume all roles should be removed. Then query the Policy Service and remove
-        // roles from this set that actually exist.
-        Set<String> rolesToRemove = catalog_.getAuthPolicy().getAllRoleNames();
-        try {
-          // Read the full policy, adding new/modified roles to "updatedRoles".
-          for (TSentryRole sentryRole:
-              sentryPolicyService_.listAllRoles(processUser_)) {
-            // This role exists and should not be removed, delete it from the
-            // rolesToRemove set.
-            rolesToRemove.remove(sentryRole.getRoleName().toLowerCase());
-
-            Set<String> grantGroups = Sets.newHashSet();
-            for (TSentryGroup group: sentryRole.getGroups()) {
-              grantGroups.add(group.getGroupName());
-            }
-            Role existingRole =
-                catalog_.getAuthPolicy().getRole(sentryRole.getRoleName());
-            Role role;
-            // These roles are the same, use the current role.
-            if (existingRole != null &&
-                existingRole.getGrantGroups().equals(grantGroups)) {
-              role = existingRole;
-            } else {
-              role = catalog_.addRole(sentryRole.getRoleName(), grantGroups);
-            }
-
-            // Assume all privileges should be removed. Privileges that still exist are
-            // deleted from this set and we are left with the set of privileges that need
-            // to be removed.
-            Set<String> privilegesToRemove = role.getPrivilegeNames();
-
-            // Check all the privileges that are part of this role.
-            for (TSentryPrivilege sentryPriv:
-                sentryPolicyService_.listRolePrivileges(processUser_, role.getName())) {
-              TPrivilege thriftPriv =
-                  SentryPolicyService.sentryPrivilegeToTPrivilege(sentryPriv);
-              thriftPriv.setRole_id(role.getId());
-              privilegesToRemove.remove(thriftPriv.getPrivilege_name().toLowerCase());
-
-              RolePrivilege existingPriv =
-                  role.getPrivilege(thriftPriv.getPrivilege_name());
-              // We already know about this privilege (privileges cannot be modified).
-              if (existingPriv != null &&
-                  existingPriv.getCreateTimeMs() == sentryPriv.getCreateTime()) {
-                continue;
-              }
-              catalog_.addRolePrivilege(role.getName(), thriftPriv);
-            }
-
-            // Remove the privileges that no longer exist.
-            for (String privilegeName: privilegesToRemove) {
-              TPrivilege privilege = new TPrivilege();
-              privilege.setPrivilege_name(privilegeName);
-              catalog_.removeRolePrivilege(role.getName(), privilege);
-            }
-          }
-        } catch (Exception e) {
-          LOG.error("Error refreshing Sentry policy: ", e);
-          return;
-        }
-
-        // Remove all the roles, incrementing the catalog version to indicate
-        // a change.
-        for (String roleName: rolesToRemove) {
-          catalog_.removeRole(roleName);
-        }
-      }
-    }
-  }
-
-  /**
-   * Checks whether this user is an admin on the Sentry Service. Throws an
-   * AuthorizationException if the user does not have admin privileges or if there are
-   * any issues communicating with the Sentry Service..
-   * @param requestingUser - The requesting user.
-   */
-  public void checkUserSentryAdmin(User requestingUser)
-      throws AuthorizationException {
-    // Check if the user has access by issuing a read-only RPC.
-    // TODO: This is not an elegant way to verify whether the user has privileges to
-    // access Sentry. This should be modified in the future when Sentry has
-    // a more robust mechanism to perform these checks.
-    try {
-      sentryPolicyService_.listAllRoles(requestingUser);
-    } catch (ImpalaException e) {
-      throw new AuthorizationException(String.format("User '%s' does not have " +
-          "privileges to access the requested policy metadata or Sentry Service is " +
-          "unavailable.", requestingUser.getName()));
-    }
-  }
-
-  /**
-   * Creates a new role using the Sentry Service and updates the Impala catalog.
-   * If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the new Role.
-   * Throws exception if there was any error updating the Sentry Service or
-   * if a role with the same name already exists in the catalog. This includes
-   * the case where a role was added externally (eg. via Hive). If the role was added
-   * externally, Impala will load it during the next refresh of the policy.
-   * TODO: Consider adding the role to the policy if we find it was created
-   * externally.
-   */
-  public synchronized Role createRole(User user, String roleName)
-      throws ImpalaException {
-    Role role = null;
-    if (catalog_.getAuthPolicy().getRole(roleName) != null) {
-      throw new CatalogException("Role already exists: " + roleName);
-    }
-    sentryPolicyService_.createRole(user, roleName, false);
-    // Initially the role has no grant groups (empty set).
-    role = catalog_.addRole(roleName, Sets.<String>newHashSet());
-    return role;
-  }
-
-  /**
-   * Drops the given role using the Sentry Service and updates the Impala catalog.
-   * If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the removed Role or null if the role did not exist in the
-   * Catalog.
-   * Throws exception if there was any error updating the Sentry Service.
-   */
-  public synchronized Role dropRole(User user, String roleName) throws ImpalaException {
-    sentryPolicyService_.dropRole(user, roleName, false);
-    return catalog_.removeRole(roleName);
-  }
-
-  /**
-   * Removes the role grant group using the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the updated Role.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized Role grantRoleGroup(User user, String roleName, String groupName)
-      throws ImpalaException {
-    sentryPolicyService_.grantRoleToGroup(user, roleName, groupName);
-    return catalog_.addRoleGrantGroup(roleName, groupName);
-  }
-
-  /**
-   * Removes the role grant group using the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the updated Role.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized Role revokeRoleGroup(User user, String roleName, String groupName)
-      throws ImpalaException {
-    sentryPolicyService_.revokeRoleFromGroup(user, roleName, groupName);
-    return catalog_.removeRoleGrantGroup(roleName, groupName);
-  }
-
-  /**
-   * Grants privileges to a role in the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails, the Impala catalog will not
-   * be modified. Returns the granted privileges.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized List<RolePrivilege> grantRolePrivileges(User user,
-      String roleName, List<TPrivilege> privileges) throws ImpalaException {
-    sentryPolicyService_.grantRolePrivileges(user, roleName, privileges);
-    // Update the catalog
-    List<RolePrivilege> rolePrivileges = Lists.newArrayList();
-    for (TPrivilege privilege: privileges) {
-      rolePrivileges.add(catalog_.addRolePrivilege(roleName, privilege));
-    }
-    return rolePrivileges;
-  }
-
-  /**
-   * Revokes privileges from a role in the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not be
-   * modified. Returns the removed privileges. Throws an exception if there was any error
-   * updating the Sentry Service or if the Impala catalog does not contain the given role
-   * name.
-   */
-  public synchronized List<RolePrivilege> revokeRolePrivileges(User user,
-      String roleName, List<TPrivilege> privileges, boolean hasGrantOption)
-      throws ImpalaException {
-    List<RolePrivilege> rolePrivileges = Lists.newArrayList();
-    if (!hasGrantOption) {
-      sentryPolicyService_.revokeRolePrivileges(user, roleName, privileges);
-      // Update the catalog
-      for (TPrivilege privilege: privileges) {
-        RolePrivilege rolePriv = catalog_.removeRolePrivilege(roleName, privilege);
-        if (rolePriv == null) {
-          rolePriv = RolePrivilege.fromThrift(privilege);
-          rolePriv.setCatalogVersion(catalog_.getCatalogVersion());
-        }
-        rolePrivileges.add(rolePriv);
-      }
-    } else {
-      // If the REVOKE GRANT OPTION has been specified, the privileges should not be
-      // removed, they should just be updated to clear the GRANT OPTION flag. Sentry
-      // does not yet provide an "alter privilege" API so we need to revoke the
-      // privileges and re-grant them.
-      sentryPolicyService_.revokeRolePrivileges(user, roleName, privileges);
-      List<TPrivilege> updatedPrivileges = Lists.newArrayList();
-      for (TPrivilege privilege: privileges) {
-        RolePrivilege existingPriv = catalog_.getRolePrivilege(roleName, privilege);
-        if (existingPriv == null) {
-          RolePrivilege rolePriv = RolePrivilege.fromThrift(privilege);
-          rolePriv.setCatalogVersion(catalog_.getCatalogVersion());
-          rolePrivileges.add(rolePriv);
-          continue;
-        }
-        TPrivilege updatedPriv = existingPriv.toThrift();
-        updatedPriv.setHas_grant_opt(false);
-        updatedPrivileges.add(updatedPriv);
-      }
-      // Re-grant the updated privileges.
-      sentryPolicyService_.grantRolePrivileges(user, roleName, updatedPrivileges);
-      // Update the catalog
-      for (TPrivilege updatedPriv: updatedPrivileges) {
-        rolePrivileges.add(catalog_.addRolePrivilege(roleName, updatedPriv));
-      }
-    }
-    return rolePrivileges;
-  }
-
-  /**
-   * Perfoms a synchronous refresh of all authorization policy metadata and updates
-   * the Catalog with any changes. Throws an ImpalaRuntimeException if there are any
-   * errors executing the refresh job.
-   */
-  public void refresh() throws ImpalaRuntimeException {
-    try {
-      policyReader_.submit(new PolicyReader()).get();
-    } catch (Exception e) {
-      // We shouldn't make it here. It means an exception leaked from the
-      // AuthorizationPolicyReader.
-      throw new ImpalaRuntimeException("Error refreshing authorization policy, " +
-          "current policy state may be inconsistent. Running 'invalidate metadata' " +
-          "may resolve this problem: ", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java b/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
deleted file mode 100644
index 9956c79..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-/**
- *  This is a utility class to incrementally calculate average, variance
- *  and standard deviation. It's based on an algorithm devised by Knuth.
- *
- *  Please keep in mind, that there might be edge cases where the below algorithm
- *  might produce a loss of precision.
- *
- *  See below link for more detail:
- *
- *  http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Incremental_algorithm
- */
-public class StatsHelper<T extends Number> {
-
-  private long count_ = 0;
-
-  // Current mean
-  private double mean_ = 0.0d;
-
-  // Sum of the square differences from the mean
-  private double m2_ = 0.0d;
-
-  public void addSample(T val) {
-    ++count_;
-    mean_ += (val.doubleValue() - mean_) / count_;
-    m2_ += Math.pow(val.doubleValue() - mean_, 2);
-  }
-
-  public long count() { return count_; }
-
-  public double mean() {
-    return count_ > 0 ? mean_ : 0.0;
-  }
-
-  public double variance() {
-    return count_ > 1 ? m2_ / (count_ - 1) : 0.0d;
-  }
-
-  public double stddev() {
-    return Math.sqrt(variance());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java b/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
deleted file mode 100644
index 2e58efa..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TAccessLevel;
-
-/**
- * Utility functions for working with TAccessLevel types.
- */
-public class TAccessLevelUtil {
-  public static boolean impliesWriteAccess(TAccessLevel level) {
-    return level == TAccessLevel.READ_WRITE ||
-           level == TAccessLevel.WRITE_ONLY;
-  }
-
-  public static boolean impliesReadAccess(TAccessLevel level) {
-    return level == TAccessLevel.READ_WRITE ||
-           level == TAccessLevel.READ_ONLY;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java b/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
deleted file mode 100644
index 23d6eb6..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TColumnValue;
-
-/**
- * Utility functions for working with TColumnValue objects.
- */
-public class TColumnValueUtil {
-  /**
-   * Extract numeric value from TColumnValue.
-   */
-  public static double getNumericVal(TColumnValue val) {
-    if (val.isSetByte_val()) {
-      return (double) val.byte_val;
-    } else if (val.isSetShort_val()) {
-      return (double) val.short_val;
-    } else if (val.isSetInt_val()) {
-      return (double) val.int_val;
-    } else if (val.isSetLong_val()) {
-      return (double) val.long_val;
-    } else if (val.isSetDouble_val()) {
-      return (double) val.double_val;
-    } else if (val.isSetString_val()) {
-      // we always return decimals as strings, even with as_ascii=false
-      // in Expr::GetValue()
-      try {
-        return Double.valueOf(val.string_val);
-      } catch (NumberFormatException e) {
-        return 0;
-      }
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java b/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
deleted file mode 100644
index ae70eb4..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
-
-/**
- * Utility class for building TResultRows.
- */
-public class TResultRowBuilder {
-  private final TResultRow row_ = new TResultRow();
-
-  public TResultRowBuilder add(long val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setLong_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder add(double val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setDouble_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder add(String val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setString_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder addBytes(long val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setString_val(PrintUtils.printBytes(val));
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder reset() {
-    row_.clear();
-    return this;
-  }
-
-  public TResultRow get() { return row_; }
-}



[08/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
new file mode 100644
index 0000000..d0d1a85
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Type;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Encapsulates the analytic functions found in a single select block plus
+ * the corresponding analytic result tuple and its substitution map.
+ */
+public class AnalyticInfo extends AggregateInfoBase {
+  private final static Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class);
+
+  // All unique analytic exprs of a select block. Used to populate
+  // super.aggregateExprs_ based on AnalyticExpr.getFnCall() for each analytic expr
+  // in this list.
+  private final ArrayList<Expr> analyticExprs_;
+
+  // Intersection of the partition exps of all the analytic functions.
+  private final List<Expr> commonPartitionExprs_;
+
+  // map from analyticExprs_ to their corresponding analytic tuple slotrefs
+  private final ExprSubstitutionMap analyticTupleSmap_;
+
+  private AnalyticInfo(ArrayList<Expr> analyticExprs) {
+    super(new ArrayList<Expr>(), new ArrayList<FunctionCallExpr>());
+    analyticExprs_ = Expr.cloneList(analyticExprs);
+    // Extract the analytic function calls for each analytic expr.
+    for (Expr analyticExpr: analyticExprs) {
+      aggregateExprs_.add(((AnalyticExpr) analyticExpr).getFnCall());
+    }
+    analyticTupleSmap_ = new ExprSubstitutionMap();
+    commonPartitionExprs_ = computeCommonPartitionExprs();
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  private AnalyticInfo(AnalyticInfo other) {
+    super(other);
+    analyticExprs_ =
+        (other.analyticExprs_ != null) ? Expr.cloneList(other.analyticExprs_) : null;
+    analyticTupleSmap_ = other.analyticTupleSmap_.clone();
+    commonPartitionExprs_ = Expr.cloneList(other.commonPartitionExprs_);
+  }
+
+  public ArrayList<Expr> getAnalyticExprs() { return analyticExprs_; }
+  public ExprSubstitutionMap getSmap() { return analyticTupleSmap_; }
+  public List<Expr> getCommonPartitionExprs() { return commonPartitionExprs_; }
+
+  /**
+   * Creates complete AnalyticInfo for analyticExprs, including tuple descriptors and
+   * smaps.
+   */
+  static public AnalyticInfo create(
+      ArrayList<Expr> analyticExprs, Analyzer analyzer) {
+    Preconditions.checkState(analyticExprs != null && !analyticExprs.isEmpty());
+    Expr.removeDuplicates(analyticExprs);
+    AnalyticInfo result = new AnalyticInfo(analyticExprs);
+    result.createTupleDescs(analyzer);
+
+    // The tuple descriptors are logical. Their slots are remapped to physical tuples
+    // during plan generation.
+    result.outputTupleDesc_.setIsMaterialized(false);
+    result.intermediateTupleDesc_.setIsMaterialized(false);
+
+    // Populate analyticTupleSmap_
+    Preconditions.checkState(analyticExprs.size() ==
+        result.outputTupleDesc_.getSlots().size());
+    for (int i = 0; i < analyticExprs.size(); ++i) {
+      result.analyticTupleSmap_.put(result.analyticExprs_.get(i),
+          new SlotRef(result.outputTupleDesc_.getSlots().get(i)));
+      result.outputTupleDesc_.getSlots().get(i).setSourceExpr(
+          result.analyticExprs_.get(i));
+    }
+    LOG.trace("analytictuple=" + result.outputTupleDesc_.debugString());
+    LOG.trace("analytictuplesmap=" + result.analyticTupleSmap_.debugString());
+    LOG.trace("analytic info:\n" + result.debugString());
+    return result;
+  }
+
+  /**
+   * Returns the intersection of the partition exprs of all the
+   * analytic functions.
+   */
+  private List<Expr> computeCommonPartitionExprs() {
+    List<Expr> result = Lists.newArrayList();
+    for (Expr analyticExpr: analyticExprs_) {
+      Preconditions.checkState(analyticExpr.isAnalyzed_);
+      List<Expr> partitionExprs = ((AnalyticExpr) analyticExpr).getPartitionExprs();
+      if (partitionExprs == null) continue;
+      if (result.isEmpty()) {
+        result.addAll(partitionExprs);
+      } else {
+        result.retainAll(partitionExprs);
+        if (result.isEmpty()) break;
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Append ids of all slots that are being referenced in the process
+   * of performing the analytic computation described by this AnalyticInfo.
+   */
+  public void getRefdSlots(List<SlotId> ids) {
+    Preconditions.checkState(intermediateTupleDesc_ != null);
+    Expr.getIds(analyticExprs_, null, ids);
+    // The backend assumes that the entire intermediateTupleDesc is materialized
+    for (SlotDescriptor slotDesc: intermediateTupleDesc_.getSlots()) {
+      ids.add(slotDesc.getId());
+    }
+  }
+
+  @Override
+  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
+    materializedSlots_.clear();
+    List<Expr> exprs = Lists.newArrayList();
+    for (int i = 0; i < analyticExprs_.size(); ++i) {
+      SlotDescriptor outputSlotDesc = outputTupleDesc_.getSlots().get(i);
+      if (!outputSlotDesc.isMaterialized()) continue;
+      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
+      exprs.add(analyticExprs_.get(i));
+      materializedSlots_.add(i);
+    }
+    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
+    analyzer.materializeSlots(resolvedExprs);
+  }
+
+  /**
+   * Validates internal state: Checks that the number of materialized slots of the
+   * analytic tuple corresponds to the number of materialized analytic functions. Also
+   * checks that the return types of the analytic exprs correspond to the slots in the
+   * analytic tuple.
+   */
+  public void checkConsistency() {
+    ArrayList<SlotDescriptor> slots = intermediateTupleDesc_.getSlots();
+
+    // Check materialized slots.
+    int numMaterializedSlots = 0;
+    for (SlotDescriptor slotDesc: slots) {
+      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
+    }
+    Preconditions.checkState(numMaterializedSlots ==
+        materializedSlots_.size());
+
+    // Check that analytic expr return types match the slot descriptors.
+    int slotIdx = 0;
+    for (int i = 0; i < analyticExprs_.size(); ++i) {
+      Expr analyticExpr = analyticExprs_.get(i);
+      Type slotType = slots.get(slotIdx).getType();
+      Preconditions.checkState(analyticExpr.getType().equals(slotType),
+          String.format("Analytic expr %s returns type %s but its analytic tuple " +
+              "slot has type %s", analyticExpr.toSql(),
+              analyticExpr.getType().toString(), slotType.toString()));
+      ++slotIdx;
+    }
+  }
+
+  @Override
+  public String debugString() {
+    StringBuilder out = new StringBuilder(super.debugString());
+    out.append(Objects.toStringHelper(this)
+        .add("analytic_exprs", Expr.debugString(analyticExprs_))
+        .add("smap", analyticTupleSmap_.debugString())
+        .toString());
+    return out.toString();
+  }
+
+  @Override
+  protected String tupleDebugName() { return "analytic-tuple"; }
+
+  @Override
+  public AnalyticInfo clone() { return new AnalyticInfo(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
new file mode 100644
index 0000000..68558da
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
@@ -0,0 +1,417 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.math.BigDecimal;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.service.FeSupport;
+import com.cloudera.impala.thrift.TAnalyticWindow;
+import com.cloudera.impala.thrift.TAnalyticWindowBoundary;
+import com.cloudera.impala.thrift.TAnalyticWindowBoundaryType;
+import com.cloudera.impala.thrift.TAnalyticWindowType;
+import com.cloudera.impala.thrift.TColumnValue;
+import com.cloudera.impala.util.TColumnValueUtil;
+import com.google.common.base.Preconditions;
+
+
+/**
+ * Windowing clause of an analytic expr
+ * Both left and right boundaries are always non-null after analyze().
+ */
+public class AnalyticWindow {
+  // default window used when an analytic expr was given an order by but no window
+  public static final AnalyticWindow DEFAULT_WINDOW = new AnalyticWindow(Type.RANGE,
+      new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+      new Boundary(BoundaryType.CURRENT_ROW, null));
+
+  enum Type {
+    ROWS("ROWS"),
+    RANGE("RANGE");
+
+    private final String description_;
+
+    private Type(String d) {
+      description_ = d;
+    }
+
+    @Override
+    public String toString() { return description_; }
+    public TAnalyticWindowType toThrift() {
+      return this == ROWS ? TAnalyticWindowType.ROWS : TAnalyticWindowType.RANGE;
+    }
+  }
+
+  enum BoundaryType {
+    UNBOUNDED_PRECEDING("UNBOUNDED PRECEDING"),
+    UNBOUNDED_FOLLOWING("UNBOUNDED FOLLOWING"),
+    CURRENT_ROW("CURRENT ROW"),
+    PRECEDING("PRECEDING"),
+    FOLLOWING("FOLLOWING");
+
+    private final String description_;
+
+    private BoundaryType(String d) {
+      description_ = d;
+    }
+
+    @Override
+    public String toString() { return description_; }
+    public TAnalyticWindowBoundaryType toThrift() {
+      Preconditions.checkState(!isAbsolutePos());
+      if (this == CURRENT_ROW) {
+        return TAnalyticWindowBoundaryType.CURRENT_ROW;
+      } else if (this == PRECEDING) {
+        return TAnalyticWindowBoundaryType.PRECEDING;
+      } else if (this == FOLLOWING) {
+        return TAnalyticWindowBoundaryType.FOLLOWING;
+      }
+      return null;
+    }
+
+    public boolean isAbsolutePos() {
+      return this == UNBOUNDED_PRECEDING || this == UNBOUNDED_FOLLOWING;
+    }
+
+    public boolean isOffset() {
+      return this == PRECEDING || this == FOLLOWING;
+    }
+
+    public boolean isPreceding() {
+      return this == UNBOUNDED_PRECEDING || this == PRECEDING;
+    }
+
+    public boolean isFollowing() {
+      return this == UNBOUNDED_FOLLOWING || this == FOLLOWING;
+    }
+
+    public BoundaryType converse() {
+      switch (this) {
+        case UNBOUNDED_PRECEDING: return UNBOUNDED_FOLLOWING;
+        case UNBOUNDED_FOLLOWING: return UNBOUNDED_PRECEDING;
+        case PRECEDING: return FOLLOWING;
+        case FOLLOWING: return PRECEDING;
+        default: return CURRENT_ROW;
+      }
+    }
+  }
+
+  public static class Boundary {
+    private final BoundaryType type_;
+
+    // Offset expr. Only set for PRECEDING/FOLLOWING. Needed for toSql().
+    private final Expr expr_;
+
+    // The offset value. Set during analysis after evaluating expr_. Integral valued
+    // for ROWS windows.
+    private BigDecimal offsetValue_;
+
+    public BoundaryType getType() { return type_; }
+    public Expr getExpr() { return expr_; }
+    public BigDecimal getOffsetValue() { return offsetValue_; }
+
+    public Boundary(BoundaryType type, Expr e) {
+      this(type, e, null);
+    }
+
+    // c'tor used by clone()
+    private Boundary(BoundaryType type, Expr e, BigDecimal offsetValue) {
+      Preconditions.checkState(
+        (type.isOffset() && e != null)
+        || (!type.isOffset() && e == null));
+      type_ = type;
+      expr_ = e;
+      offsetValue_ = offsetValue;
+    }
+
+    public String toSql() {
+      StringBuilder sb = new StringBuilder();
+      if (expr_ != null) sb.append(expr_.toSql()).append(" ");
+      sb.append(type_.toString());
+      return sb.toString();
+    }
+
+    public TAnalyticWindowBoundary toThrift(Type windowType) {
+      TAnalyticWindowBoundary result = new TAnalyticWindowBoundary(type_.toThrift());
+      if (type_.isOffset() && windowType == Type.ROWS) {
+        result.setRows_offset_value(offsetValue_.longValue());
+      }
+      // TODO: range windows need range_offset_predicate
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj == null) return false;
+      if (obj.getClass() != this.getClass()) return false;
+      Boundary o = (Boundary)obj;
+      boolean exprEqual = (expr_ == null) == (o.expr_ == null);
+      if (exprEqual && expr_ != null) exprEqual = expr_.equals(o.expr_);
+      return type_ == o.type_ && exprEqual;
+    }
+
+    public Boundary converse() {
+      Boundary result = new Boundary(type_.converse(),
+          (expr_ != null) ? expr_.clone() : null);
+      result.offsetValue_ = offsetValue_;
+      return result;
+    }
+
+    @Override
+    public Boundary clone() {
+      return new Boundary(type_, expr_ != null ? expr_.clone() : null, offsetValue_);
+    }
+
+    public void analyze(Analyzer analyzer) throws AnalysisException {
+      if (expr_ != null) expr_.analyze(analyzer);
+    }
+  }
+
+  private final Type type_;
+  private final Boundary leftBoundary_;
+  private Boundary rightBoundary_;  // may be null before analyze()
+  private String toSqlString_;  // cached after analysis
+
+  public Type getType() { return type_; }
+  public Boundary getLeftBoundary() { return leftBoundary_; }
+  public Boundary getRightBoundary() { return rightBoundary_; }
+  public Boundary setRightBoundary(Boundary b) { return rightBoundary_ = b; }
+
+  public AnalyticWindow(Type type, Boundary b) {
+    type_ = type;
+    Preconditions.checkNotNull(b);
+    leftBoundary_ = b;
+    rightBoundary_ = null;
+  }
+
+  public AnalyticWindow(Type type, Boundary l, Boundary r) {
+    type_ = type;
+    Preconditions.checkNotNull(l);
+    leftBoundary_ = l;
+    Preconditions.checkNotNull(r);
+    rightBoundary_ = r;
+  }
+
+  /**
+   * Clone c'tor
+   */
+  private AnalyticWindow(AnalyticWindow other) {
+    type_ = other.type_;
+    Preconditions.checkNotNull(other.leftBoundary_);
+    leftBoundary_ = other.leftBoundary_.clone();
+    if (other.rightBoundary_ != null) {
+      rightBoundary_ = other.rightBoundary_.clone();
+    }
+    toSqlString_ = other.toSqlString_;  // safe to share
+  }
+
+  public AnalyticWindow reverse() {
+    Boundary newRightBoundary = leftBoundary_.converse();
+    Boundary newLeftBoundary = null;
+    if (rightBoundary_ == null) {
+      newLeftBoundary = new Boundary(leftBoundary_.getType(), null);
+    } else {
+      newLeftBoundary = rightBoundary_.converse();
+    }
+    return new AnalyticWindow(type_, newLeftBoundary, newRightBoundary);
+  }
+
+  public String toSql() {
+    if (toSqlString_ != null) return toSqlString_;
+    StringBuilder sb = new StringBuilder();
+    sb.append(type_.toString()).append(" ");
+    if (rightBoundary_ == null) {
+      sb.append(leftBoundary_.toSql());
+    } else {
+      sb.append("BETWEEN ").append(leftBoundary_.toSql()).append(" AND ");
+      sb.append(rightBoundary_.toSql());
+    }
+    return sb.toString();
+  }
+
+  public TAnalyticWindow toThrift() {
+    TAnalyticWindow result = new TAnalyticWindow(type_.toThrift());
+    if (leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
+      result.setWindow_start(leftBoundary_.toThrift(type_));
+    }
+    Preconditions.checkNotNull(rightBoundary_);
+    if (rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
+      result.setWindow_end(rightBoundary_.toThrift(type_));
+    }
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) return false;
+    if (obj.getClass() != this.getClass()) return false;
+    AnalyticWindow o = (AnalyticWindow)obj;
+    boolean rightBoundaryEqual =
+        (rightBoundary_ == null) == (o.rightBoundary_ == null);
+    if (rightBoundaryEqual && rightBoundary_ != null) {
+      rightBoundaryEqual = rightBoundary_.equals(o.rightBoundary_);
+    }
+    return type_ == o.type_
+        && leftBoundary_.equals(o.leftBoundary_)
+        && rightBoundaryEqual;
+  }
+
+  @Override
+  public AnalyticWindow clone() { return new AnalyticWindow(this); }
+
+  /**
+   * Semantic analysis for expr of a PRECEDING/FOLLOWING clause.
+   */
+  private void checkOffsetExpr(Analyzer analyzer, Boundary boundary)
+      throws AnalysisException {
+    Preconditions.checkState(boundary.getType().isOffset());
+    Expr e = boundary.getExpr();
+    Preconditions.checkNotNull(e);
+    boolean isPos = true;
+    Double val = null;
+    if (e.isConstant() && e.getType().isNumericType()) {
+      try {
+        val = TColumnValueUtil.getNumericVal(
+            FeSupport.EvalConstExpr(e, analyzer.getQueryCtx()));
+        if (val <= 0) isPos = false;
+      } catch (InternalException exc) {
+        throw new AnalysisException(
+            "Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage());
+      }
+    }
+
+    if (type_ == Type.ROWS) {
+      if (!e.isConstant() || !e.getType().isIntegerType() || !isPos) {
+        throw new AnalysisException(
+            "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
+              + "constant positive integer: " + boundary.toSql());
+      }
+      Preconditions.checkNotNull(val);
+      boundary.offsetValue_ = new BigDecimal(val.longValue());
+    } else {
+      if (!e.isConstant() || !e.getType().isNumericType() || !isPos) {
+        throw new AnalysisException(
+            "For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
+              + "constant positive number: " + boundary.toSql());
+      }
+      boundary.offsetValue_ = new BigDecimal(val);
+    }
+  }
+
+  /**
+   * Check that b1 <= b2.
+   */
+  private void checkOffsetBoundaries(Analyzer analyzer, Boundary b1, Boundary b2)
+      throws AnalysisException {
+    Preconditions.checkState(b1.getType().isOffset());
+    Preconditions.checkState(b2.getType().isOffset());
+    Expr e1 = b1.getExpr();
+    Preconditions.checkState(
+        e1 != null && e1.isConstant() && e1.getType().isNumericType());
+    Expr e2 = b2.getExpr();
+    Preconditions.checkState(
+        e2 != null && e2.isConstant() && e2.getType().isNumericType());
+
+    try {
+      TColumnValue val1 = FeSupport.EvalConstExpr(e1, analyzer.getQueryCtx());
+      TColumnValue val2 = FeSupport.EvalConstExpr(e2, analyzer.getQueryCtx());
+      double left = TColumnValueUtil.getNumericVal(val1);
+      double right = TColumnValueUtil.getNumericVal(val2);
+      if (left > right) {
+        throw new AnalysisException(
+            "Offset boundaries are in the wrong order: " + toSql());
+      }
+    } catch (InternalException exc) {
+      throw new AnalysisException(
+          "Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage());
+    }
+
+  }
+
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    leftBoundary_.analyze(analyzer);
+    if (rightBoundary_ != null) rightBoundary_.analyze(analyzer);
+
+    if (leftBoundary_.getType() == BoundaryType.UNBOUNDED_FOLLOWING) {
+      throw new AnalysisException(
+          leftBoundary_.getType().toString() + " is only allowed for upper bound of "
+            + "BETWEEN");
+    }
+    if (rightBoundary_ != null
+        && rightBoundary_.getType() == BoundaryType.UNBOUNDED_PRECEDING) {
+      throw new AnalysisException(
+          rightBoundary_.getType().toString() + " is only allowed for lower bound of "
+            + "BETWEEN");
+    }
+
+    // TODO: Remove when RANGE windows with offset boundaries are supported.
+    if (type_ == Type.RANGE) {
+      if (leftBoundary_.type_.isOffset()
+          || (rightBoundary_ != null && rightBoundary_.type_.isOffset())
+          || (leftBoundary_.type_ == BoundaryType.CURRENT_ROW
+              && (rightBoundary_ == null
+                  || rightBoundary_.type_ == BoundaryType.CURRENT_ROW))) {
+        throw new AnalysisException(
+            "RANGE is only supported with both the lower and upper bounds UNBOUNDED or"
+            + " one UNBOUNDED and the other CURRENT ROW.");
+      }
+    }
+
+    if (rightBoundary_ == null && leftBoundary_.getType() == BoundaryType.FOLLOWING) {
+      throw new AnalysisException(
+          leftBoundary_.getType().toString() + " requires a BETWEEN clause");
+    }
+
+    if (leftBoundary_.getType().isOffset()) checkOffsetExpr(analyzer, leftBoundary_);
+    if (rightBoundary_ == null) {
+      // set right boundary to implied value, but make sure to cache toSql string
+      // beforehand
+      toSqlString_ = toSql();
+      rightBoundary_ = new Boundary(BoundaryType.CURRENT_ROW, null);
+      return;
+    }
+    if (rightBoundary_.getType().isOffset()) checkOffsetExpr(analyzer, rightBoundary_);
+
+    if (leftBoundary_.getType() == BoundaryType.FOLLOWING) {
+      if (rightBoundary_.getType() != BoundaryType.FOLLOWING
+          && rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
+        throw new AnalysisException(
+            "A lower window bound of " + BoundaryType.FOLLOWING.toString()
+              + " requires that the upper bound also be "
+              + BoundaryType.FOLLOWING.toString());
+      }
+      if (rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
+        checkOffsetBoundaries(analyzer, leftBoundary_, rightBoundary_);
+      }
+    }
+
+    if (rightBoundary_.getType() == BoundaryType.PRECEDING) {
+      if (leftBoundary_.getType() != BoundaryType.PRECEDING
+          && leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
+        throw new AnalysisException(
+            "An upper window bound of " + BoundaryType.PRECEDING.toString()
+              + " requires that the lower bound also be "
+              + BoundaryType.PRECEDING.toString());
+      }
+      if (leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
+        checkOffsetBoundaries(analyzer, rightBoundary_, leftBoundary_);
+      }
+    }
+  }
+}


[38/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/QueryStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/QueryStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/QueryStmt.java
deleted file mode 100644
index 791305b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/QueryStmt.java
+++ /dev/null
@@ -1,458 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Set;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Abstract base class for any statement that returns results
- * via a list of result expressions, for example a
- * SelectStmt or UnionStmt. Also maintains a map of expression substitutions
- * for replacing expressions from ORDER BY or GROUP BY clauses with
- * their corresponding result expressions.
- * Used for sharing members/methods and some of the analysis code, in particular the
- * analysis of the ORDER BY and LIMIT clauses.
- *
- */
-public abstract class QueryStmt extends StatementBase {
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  protected WithClause withClause_;
-
-  protected ArrayList<OrderByElement> orderByElements_;
-  protected LimitElement limitElement_;
-
-  // For a select statment:
-  // original list of exprs in select clause (star-expanded, ordinals and
-  // aliases substituted, agg output substituted)
-  // For a union statement:
-  // list of slotrefs into the tuple materialized by the union.
-  protected ArrayList<Expr> resultExprs_ = Lists.newArrayList();
-
-  // For a select statment: select list exprs resolved to base tbl refs
-  // For a union statement: same as resultExprs
-  protected ArrayList<Expr> baseTblResultExprs_ = Lists.newArrayList();
-
-  /**
-   * Map of expression substitutions for replacing aliases
-   * in "order by" or "group by" clauses with their corresponding result expr.
-   */
-  protected final ExprSubstitutionMap aliasSmap_;
-
-  /**
-   * Select list item alias does not have to be unique.
-   * This list contains all the non-unique aliases. For example,
-   *   select int_col a, string_col a from alltypessmall;
-   * Both columns are using the same alias "a".
-   */
-  protected final ArrayList<Expr> ambiguousAliasList_;
-
-  protected SortInfo sortInfo_;
-
-  // evaluateOrderBy_ is true if there is an order by clause that must be evaluated.
-  // False for nested query stmts with an order-by clause without offset/limit.
-  // sortInfo_ is still generated and used in analysis to ensure that the order-by clause
-  // is well-formed.
-  protected boolean evaluateOrderBy_;
-
-  /////////////////////////////////////////
-  // END: Members that need to be reset()
-
-  QueryStmt(ArrayList<OrderByElement> orderByElements, LimitElement limitElement) {
-    orderByElements_ = orderByElements;
-    sortInfo_ = null;
-    limitElement_ = limitElement == null ? new LimitElement(null, null) : limitElement;
-    aliasSmap_ = new ExprSubstitutionMap();
-    ambiguousAliasList_ = Lists.newArrayList();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    super.analyze(analyzer);
-    analyzeLimit(analyzer);
-    if (hasWithClause()) withClause_.analyze(analyzer);
-  }
-
-  /**
-   * Returns a list containing all the materialized tuple ids that this stmt is
-   * correlated with (i.e., those tuple ids from outer query blocks that TableRefs
-   * inside this stmt are rooted at).
-   *
-   * Throws if this stmt contains an illegal mix of un/correlated table refs.
-   * A statement is illegal if it contains a TableRef correlated with a parent query
-   * block as well as a table ref with an absolute path (e.g. a BaseTabeRef). Such a
-   * statement would generate a Subplan containing a base table scan (very expensive),
-   * and should therefore be avoided.
-   *
-   * In other words, the following cases are legal:
-   * (1) only uncorrelated table refs
-   * (2) only correlated table refs
-   * (3) a mix of correlated table refs and table refs rooted at those refs
-   *     (the statement is 'self-contained' with respect to correlation)
-   */
-  public List<TupleId> getCorrelatedTupleIds(Analyzer analyzer)
-      throws AnalysisException {
-    // Correlated tuple ids of this stmt.
-    List<TupleId> correlatedTupleIds = Lists.newArrayList();
-    // First correlated and absolute table refs. Used for error detection/reporting.
-    // We pick the first ones for simplicity. Choosing arbitrary ones is equally valid.
-    TableRef correlatedRef = null;
-    TableRef absoluteRef = null;
-    // Materialized tuple ids of the table refs checked so far.
-    Set<TupleId> tblRefIds = Sets.newHashSet();
-
-    List<TableRef> tblRefs = Lists.newArrayList();
-    collectTableRefs(tblRefs);
-    for (TableRef tblRef: tblRefs) {
-      if (absoluteRef == null && !tblRef.isRelative()) absoluteRef = tblRef;
-      if (tblRef.isCorrelated()) {
-        // Check if the correlated table ref is rooted at a tuple descriptor from within
-        // this query stmt. If so, the correlation is contained within this stmt
-        // and the table ref does not conflict with absolute refs.
-        CollectionTableRef t = (CollectionTableRef) tblRef;
-        Preconditions.checkState(t.getResolvedPath().isRootedAtTuple());
-        // This check relies on tblRefs being in depth-first order.
-        if (!tblRefIds.contains(t.getResolvedPath().getRootDesc().getId())) {
-          if (correlatedRef == null) correlatedRef = tblRef;
-          correlatedTupleIds.add(t.getResolvedPath().getRootDesc().getId());
-        }
-      }
-      if (correlatedRef != null && absoluteRef != null) {
-        throw new AnalysisException(String.format(
-            "Nested query is illegal because it contains a table reference '%s' " +
-            "correlated with an outer block as well as an uncorrelated one '%s':\n%s",
-            correlatedRef.tableRefToSql(), absoluteRef.tableRefToSql(), toSql()));
-      }
-      tblRefIds.add(tblRef.getId());
-    }
-    return correlatedTupleIds;
-  }
-
-  private void analyzeLimit(Analyzer analyzer) throws AnalysisException {
-    if (limitElement_.getOffsetExpr() != null && !hasOrderByClause()) {
-      throw new AnalysisException("OFFSET requires an ORDER BY clause: " +
-          limitElement_.toSql().trim());
-    }
-    limitElement_.analyze(analyzer);
-  }
-
-  /**
-   * Creates sortInfo by resolving aliases and ordinals in the orderingExprs.
-   * If the query stmt is an inline view/union operand, then order-by with no
-   * limit with offset is not allowed, since that requires a sort and merging-exchange,
-   * and subsequent query execution would occur on a single machine.
-   * Sets evaluateOrderBy_ to false for ignored order-by w/o limit/offset in nested
-   * queries.
-   */
-  protected void createSortInfo(Analyzer analyzer) throws AnalysisException {
-    // not computing order by
-    if (orderByElements_ == null) {
-      evaluateOrderBy_ = false;
-      return;
-    }
-
-    ArrayList<Expr> orderingExprs = Lists.newArrayList();
-    ArrayList<Boolean> isAscOrder = Lists.newArrayList();
-    ArrayList<Boolean> nullsFirstParams = Lists.newArrayList();
-
-    // extract exprs
-    for (OrderByElement orderByElement: orderByElements_) {
-      if (orderByElement.getExpr().contains(Predicates.instanceOf(Subquery.class))) {
-        throw new AnalysisException(
-            "Subqueries are not supported in the ORDER BY clause.");
-      }
-      // create copies, we don't want to modify the original parse node, in case
-      // we need to print it
-      orderingExprs.add(orderByElement.getExpr().clone());
-      isAscOrder.add(Boolean.valueOf(orderByElement.isAsc()));
-      nullsFirstParams.add(orderByElement.getNullsFirstParam());
-    }
-    substituteOrdinalsAliases(orderingExprs, "ORDER BY", analyzer);
-
-    if (!analyzer.isRootAnalyzer() && hasOffset() && !hasLimit()) {
-      throw new AnalysisException("Order-by with offset without limit not supported" +
-        " in nested queries.");
-    }
-
-    sortInfo_ = new SortInfo(orderingExprs, isAscOrder, nullsFirstParams);
-    // order by w/o limit and offset in inline views, union operands and insert statements
-    // are ignored.
-    if (!hasLimit() && !hasOffset() && !analyzer.isRootAnalyzer()) {
-      evaluateOrderBy_ = false;
-      // Return a warning that the order by was ignored.
-      StringBuilder strBuilder = new StringBuilder();
-      strBuilder.append("Ignoring ORDER BY clause without LIMIT or OFFSET: ");
-      strBuilder.append("ORDER BY ");
-      strBuilder.append(orderByElements_.get(0).toSql());
-      for (int i = 1; i < orderByElements_.size(); ++i) {
-        strBuilder.append(", ").append(orderByElements_.get(i).toSql());
-      }
-      strBuilder.append(".\nAn ORDER BY appearing in a view, subquery, union operand, ");
-      strBuilder.append("or an insert/ctas statement has no effect on the query result ");
-      strBuilder.append("unless a LIMIT and/or OFFSET is used in conjunction ");
-      strBuilder.append("with the ORDER BY.");
-      analyzer.addWarning(strBuilder.toString());
-    } else {
-      evaluateOrderBy_ = true;
-    }
-  }
-
-  /**
-   * Create a tuple descriptor for the single tuple that is materialized, sorted and
-   * output by the exec node implementing the sort. Done by materializing slot refs in
-   * the order-by and result expressions. Those SlotRefs in the ordering and result exprs
-   * are substituted with SlotRefs into the new tuple. This simplifies sorting logic for
-   * total (no limit) sorts.
-   * Done after analyzeAggregation() since ordering and result exprs may refer to
-   * the outputs of aggregation. Invoked for UnionStmt as well since
-   * TODO: We could do something more sophisticated than simply copying input
-   * slotrefs - e.g. compute some order-by expressions.
-   */
-  protected void createSortTupleInfo(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(evaluateOrderBy_);
-
-    for (Expr orderingExpr: sortInfo_.getOrderingExprs()) {
-      if (orderingExpr.getType().isComplexType()) {
-        throw new AnalysisException(String.format("ORDER BY expression '%s' with " +
-            "complex type '%s' is not supported.", orderingExpr.toSql(),
-            orderingExpr.getType().toSql()));
-      }
-    }
-
-    // sourceSlots contains the slots from the input row to materialize.
-    Set<SlotRef> sourceSlots = Sets.newHashSet();
-    TreeNode.collect(resultExprs_, Predicates.instanceOf(SlotRef.class), sourceSlots);
-    TreeNode.collect(sortInfo_.getOrderingExprs(), Predicates.instanceOf(SlotRef.class),
-        sourceSlots);
-
-    TupleDescriptor sortTupleDesc = analyzer.getDescTbl().createTupleDescriptor("sort");
-    List<Expr> sortTupleExprs = Lists.newArrayList();
-    sortTupleDesc.setIsMaterialized(true);
-    // substOrderBy is the mapping from slot refs in the input row to slot refs in the
-    // materialized sort tuple.
-    ExprSubstitutionMap substOrderBy = new ExprSubstitutionMap();
-    for (SlotRef origSlotRef: sourceSlots) {
-      SlotDescriptor origSlotDesc = origSlotRef.getDesc();
-      SlotDescriptor materializedDesc =
-          analyzer.copySlotDescriptor(origSlotDesc, sortTupleDesc);
-      SlotRef cloneRef = new SlotRef(materializedDesc);
-      substOrderBy.put(origSlotRef, cloneRef);
-      if (hasLimit()) {
-        analyzer.registerValueTransfer(origSlotRef.getSlotId(), cloneRef.getSlotId());
-      } else {
-        analyzer.createAuxEquivPredicate(cloneRef, origSlotRef);
-      }
-      sortTupleExprs.add(origSlotRef);
-    }
-
-    resultExprs_ = Expr.substituteList(resultExprs_, substOrderBy, analyzer, false);
-    sortInfo_.substituteOrderingExprs(substOrderBy, analyzer);
-    sortInfo_.setMaterializedTupleInfo(sortTupleDesc, sortTupleExprs);
-  }
-
-  /**
-   * Return the first expr in exprs that is a non-unique alias. Return null if none of
-   * exprs is an ambiguous alias.
-   */
-  protected Expr getFirstAmbiguousAlias(List<Expr> exprs) {
-    for (Expr exp: exprs) {
-      if (ambiguousAliasList_.contains(exp)) return exp;
-    }
-    return null;
-  }
-
-  /**
-   * Substitute exprs of the form "<number>"  with the corresponding
-   * expressions and any alias references in aliasSmap_.
-   * Modifies exprs list in-place.
-   */
-  protected void substituteOrdinalsAliases(List<Expr> exprs, String errorPrefix,
-      Analyzer analyzer) throws AnalysisException {
-    Expr ambiguousAlias = getFirstAmbiguousAlias(exprs);
-    if (ambiguousAlias != null) {
-      throw new AnalysisException("Column '" + ambiguousAlias.toSql() +
-          "' in " + errorPrefix + " clause is ambiguous");
-    }
-
-    ListIterator<Expr> i = exprs.listIterator();
-    while (i.hasNext()) {
-      Expr expr = i.next();
-      // We can substitute either by ordinal or by alias.
-      // If we substitute by ordinal, we should not replace any aliases, since
-      // the new expression was copied from the select clause context, where
-      // alias substitution is not performed in the same way.
-      Expr substituteExpr = trySubstituteOrdinal(expr, errorPrefix, analyzer);
-      if (substituteExpr == null) {
-        substituteExpr = expr.trySubstitute(aliasSmap_, analyzer, false);
-      }
-      i.set(substituteExpr);
-    }
-  }
-
-  // Attempt to replace an expression of form "<number>" with the corresponding
-  // select list items.  Return null if not an ordinal expression.
-  private Expr trySubstituteOrdinal(Expr expr, String errorPrefix,
-      Analyzer analyzer) throws AnalysisException {
-    if (!(expr instanceof NumericLiteral)) return null;
-    expr.analyze(analyzer);
-    if (!expr.getType().isIntegerType()) return null;
-    long pos = ((NumericLiteral) expr).getLongValue();
-    if (pos < 1) {
-      throw new AnalysisException(
-          errorPrefix + ": ordinal must be >= 1: " + expr.toSql());
-    }
-    if (pos > resultExprs_.size()) {
-      throw new AnalysisException(
-          errorPrefix + ": ordinal exceeds number of items in select list: "
-          + expr.toSql());
-    }
-
-    // Create copy to protect against accidentally shared state.
-    return resultExprs_.get((int) pos - 1).clone();
-  }
-
-  /**
-   * UnionStmt and SelectStmt have different implementations.
-   */
-  public abstract ArrayList<String> getColLabels();
-
-  /**
-   * Returns the materialized tuple ids of the output of this stmt.
-   * Used in case this stmt is part of an @InlineViewRef,
-   * since we need to know the materialized tupls ids of a TableRef.
-   * This call must be idempotent because it may be called more than once for Union stmt.
-   * TODO: The name of this function has become outdated due to analytics
-   * producing logical (non-materialized) tuples. Re-think and clean up.
-   */
-  public abstract void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList);
-
-  /**
-   * Returns all physical (non-inline-view) TableRefs of this statement and the nested
-   * statements of inline views. The returned TableRefs are in depth-first order.
-   */
-  public abstract void collectTableRefs(List<TableRef> tblRefs);
-
-  public void setWithClause(WithClause withClause) { this.withClause_ = withClause; }
-  public boolean hasWithClause() { return withClause_ != null; }
-  public WithClause getWithClause() { return withClause_; }
-  public boolean hasOrderByClause() { return orderByElements_ != null; }
-  public boolean hasLimit() { return limitElement_.getLimitExpr() != null; }
-  public long getLimit() { return limitElement_.getLimit(); }
-  public boolean hasOffset() { return limitElement_.getOffsetExpr() != null; }
-  public long getOffset() { return limitElement_.getOffset(); }
-  public SortInfo getSortInfo() { return sortInfo_; }
-  public boolean evaluateOrderBy() { return evaluateOrderBy_; }
-  public ArrayList<Expr> getResultExprs() { return resultExprs_; }
-  public ArrayList<Expr> getBaseTblResultExprs() { return baseTblResultExprs_; }
-  public void setLimit(long limit) throws AnalysisException {
-    Preconditions.checkState(limit >= 0);
-    long newLimit = hasLimit() ? Math.min(limit, getLimit()) : limit;
-    limitElement_ = new LimitElement(new NumericLiteral(Long.toString(newLimit),
-        Type.BIGINT), null);
-  }
-
-  /**
-   * Mark all slots that need to be materialized for the execution of this stmt.
-   * This excludes slots referenced in resultExprs (it depends on the consumer of
-   * the output of the stmt whether they'll be accessed) and single-table predicates
-   * (the PlanNode that materializes that tuple can decide whether evaluating those
-   * predicates requires slot materialization).
-   * This is called prior to plan tree generation and allows tuple-materializing
-   * PlanNodes to compute their tuple's mem layout.
-   */
-  public abstract void materializeRequiredSlots(Analyzer analyzer);
-
-  /**
-   * Mark slots referenced in exprs as materialized.
-   */
-  protected void materializeSlots(Analyzer analyzer, List<Expr> exprs) {
-    List<SlotId> slotIds = Lists.newArrayList();
-    for (Expr e: exprs) {
-      e.getIds(null, slotIds);
-    }
-    analyzer.getDescTbl().markSlotsMaterialized(slotIds);
-  }
-
-  /**
-   * Substitutes the result expressions with smap. Preserves the original types of
-   * those expressions during the substitution.
-   */
-  public void substituteResultExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
-    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer, true);
-  }
-
-  public ArrayList<OrderByElement> cloneOrderByElements() {
-    if (orderByElements_ == null) return null;
-    ArrayList<OrderByElement> result =
-        Lists.newArrayListWithCapacity(orderByElements_.size());
-    for (OrderByElement o: orderByElements_) result.add(o.clone());
-    return result;
-  }
-
-  public WithClause cloneWithClause() {
-    return withClause_ != null ? withClause_.clone() : null;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected QueryStmt(QueryStmt other) {
-    super(other);
-    withClause_ = other.cloneWithClause();
-    orderByElements_ = other.cloneOrderByElements();
-    limitElement_ = other.limitElement_.clone();
-    resultExprs_ = Expr.cloneList(other.resultExprs_);
-    baseTblResultExprs_ = Expr.cloneList(other.baseTblResultExprs_);
-    aliasSmap_ = other.aliasSmap_.clone();
-    ambiguousAliasList_ = Expr.cloneList(other.ambiguousAliasList_);
-    sortInfo_ = (other.sortInfo_ != null) ? other.sortInfo_.clone() : null;
-    analyzer_ = other.analyzer_;
-    evaluateOrderBy_ = other.evaluateOrderBy_;
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    if (orderByElements_ != null) {
-      for (OrderByElement o: orderByElements_) o.getExpr().reset();
-    }
-    limitElement_.reset();
-    resultExprs_.clear();
-    baseTblResultExprs_.clear();
-    aliasSmap_.clear();
-    ambiguousAliasList_.clear();
-    sortInfo_ = null;
-    evaluateOrderBy_ = false;
-  }
-
-  @Override
-  public abstract QueryStmt clone();
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ResetMetadataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ResetMetadataStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ResetMetadataStmt.java
deleted file mode 100644
index ac6c390..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ResetMetadataStmt.java
+++ /dev/null
@@ -1,110 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
-/**
- * Representation of a REFRESH/INVALIDATE METADATA statement.
- */
-public class ResetMetadataStmt extends StatementBase {
-  // Updated during analysis. Null if invalidating the entire catalog.
-  private TableName tableName_;
-
-  // true if it is a REFRESH statement.
-  private final boolean isRefresh_;
-
-  // not null when refreshing a single partition
-  private final PartitionSpec partitionSpec_;
-
-  public ResetMetadataStmt(TableName name, boolean isRefresh,
-      PartitionSpec partitionSpec) {
-    Preconditions.checkArgument(!isRefresh || name != null);
-    Preconditions.checkArgument(isRefresh || partitionSpec == null);
-    this.tableName_ = name;
-    this.isRefresh_ = isRefresh;
-    this.partitionSpec_ = partitionSpec;
-    if (partitionSpec_ != null) partitionSpec_.setTableName(tableName_);
-  }
-
-  public TableName getTableName() { return tableName_; }
-  public boolean isRefresh() { return isRefresh_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (tableName_ != null) {
-      String dbName = analyzer.getTargetDbName(tableName_);
-      tableName_ = new TableName(dbName, tableName_.getTbl());
-
-      if (isRefresh_) {
-        // Verify the user has privileges to access this table. Will throw if the parent
-        // database does not exists. Don't call getTable() to avoid loading the table
-        // metadata if it is not yet in this impalad's catalog cache.
-        if (!analyzer.dbContainsTable(dbName, tableName_.getTbl(), Privilege.ANY)) {
-          // Only throw an exception when the table does not exist for refresh statements
-          // since 'invalidate metadata' should add/remove tables created/dropped external
-          // to Impala.
-          throw new AnalysisException(Analyzer.TBL_DOES_NOT_EXIST_ERROR_MSG + tableName_);
-        }
-        if (partitionSpec_ != null) {
-          partitionSpec_.setPrivilegeRequirement(Privilege.ANY);
-          partitionSpec_.analyze(analyzer);
-        }
-      } else {
-        // Verify the user has privileges to access this table.
-        analyzer.registerPrivReq(new PrivilegeRequestBuilder()
-            .onTable(dbName, tableName_.getTbl()).any().toRequest());
-      }
-    } else {
-      analyzer.registerPrivReq(new PrivilegeRequest(Privilege.ALL));
-    }
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder result = new StringBuilder();
-    if (isRefresh_) {
-      result.append("INVALIDATE METADATA");
-    } else {
-      result.append("REFRESH");
-    }
-
-    if (tableName_ != null) result.append(" ").append(tableName_);
-    if (partitionSpec_ != null) result.append(" " + partitionSpec_.toSql());
-    return result.toString();
-  }
-
-  public TResetMetadataRequest toThrift() {
-    TResetMetadataRequest  params = new TResetMetadataRequest();
-    params.setIs_refresh(isRefresh_);
-    if (tableName_ != null) {
-      params.setTable_name(new TTableName(tableName_.getDb(), tableName_.getTbl()));
-    }
-    if (partitionSpec_ != null) {
-      params.setPartition_spec(partitionSpec_.toThrift());
-    }
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SelectList.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SelectList.java b/fe/src/main/java/com/cloudera/impala/analysis/SelectList.java
deleted file mode 100644
index 429d488..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SelectList.java
+++ /dev/null
@@ -1,94 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.google.common.collect.Lists;
-
-/**
- * Select list items plus optional distinct clause and optional plan hints.
- */
-public class SelectList {
-  private List<String> planHints_;
-  private boolean isDistinct_;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  private final List<SelectListItem> items_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  public SelectList(List<SelectListItem> items) {
-    isDistinct_ = false;
-    items_ = items;
-  }
-
-  public SelectList() {
-    isDistinct_ = false;
-    items_ = Lists.newArrayList();
-  }
-
-  public SelectList(List<SelectListItem> items, boolean isDistinct,
-      List<String> planHints) {
-    isDistinct_ = isDistinct;
-    items_ = items;
-    planHints_ = planHints;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  public SelectList(SelectList other) {
-    planHints_ =
-        (other.planHints_ != null) ? Lists.newArrayList(other.planHints_) : null;
-    items_ = Lists.newArrayList();
-    for (SelectListItem item: other.items_) {
-      items_.add(item.clone());
-    }
-    isDistinct_ = other.isDistinct_;
-  }
-
-  public List<SelectListItem> getItems() { return items_; }
-  public void setPlanHints(List<String> planHints) { planHints_ = planHints; }
-  public List<String> getPlanHints() { return planHints_; }
-  public boolean isDistinct() { return isDistinct_; }
-  public void setIsDistinct(boolean value) { isDistinct_ = value; }
-  public boolean hasPlanHints() { return planHints_ != null; }
-
-  public void analyzePlanHints(Analyzer analyzer) {
-    if (planHints_ == null) return;
-    for (String hint: planHints_) {
-      if (!hint.equalsIgnoreCase("straight_join")) {
-        analyzer.addWarning("PLAN hint not recognized: " + hint);
-      }
-      analyzer.setIsStraightJoin();
-    }
-  }
-
-  @Override
-  public SelectList clone() { return new SelectList(this); }
-
-  public void reset() {
-    for (SelectListItem item: items_) {
-      if (!item.isStar()) item.getExpr().reset();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SelectListItem.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SelectListItem.java b/fe/src/main/java/com/cloudera/impala/analysis/SelectListItem.java
deleted file mode 100644
index 96831f3..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SelectListItem.java
+++ /dev/null
@@ -1,129 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-class SelectListItem {
-  private final Expr expr_;
-  private String alias_;
-
-  // for "[path.]*" (excludes trailing '*')
-  private final List<String> rawPath_;
-  private final boolean isStar_;
-
-  public SelectListItem(Expr expr, String alias) {
-    super();
-    Preconditions.checkNotNull(expr);
-    expr_ = expr;
-    alias_ = alias;
-    isStar_ = false;
-    rawPath_ = null;
-  }
-
-  // select list item corresponding to path_to_struct.*
-  static public SelectListItem createStarItem(List<String> rawPath) {
-    return new SelectListItem(rawPath);
-  }
-
-  private SelectListItem(List<String> path) {
-    super();
-    expr_ = null;
-    isStar_ = true;
-    rawPath_ = path;
-  }
-
-  public Expr getExpr() { return expr_; }
-  public boolean isStar() { return isStar_; }
-  public String getAlias() { return alias_; }
-  public void setAlias(String alias) { alias_ = alias; }
-  public List<String> getRawPath() { return rawPath_; }
-
-  @Override
-  public String toString() {
-    if (!isStar_) {
-      Preconditions.checkNotNull(expr_);
-      return expr_.toSql() + ((alias_ != null) ? " " + alias_ : "");
-    } else if (rawPath_ != null) {
-      Preconditions.checkState(isStar_);
-      return Joiner.on(".").join(rawPath_) + ".*";
-    } else {
-      return "*";
-    }
-  }
-
-  public String toSql() {
-    if (!isStar_) {
-      Preconditions.checkNotNull(expr_);
-      // Enclose aliases in quotes if Hive cannot parse them without quotes.
-      // This is needed for view compatibility between Impala and Hive.
-      String aliasSql = null;
-      if (alias_ != null) aliasSql = ToSqlUtils.getIdentSql(alias_);
-      return expr_.toSql() + ((aliasSql != null) ? " " + aliasSql : "");
-    } else if (rawPath_ != null) {
-      Preconditions.checkState(isStar_);
-      StringBuilder result = new StringBuilder();
-      for (String p: rawPath_) {
-        if (result.length() > 0) result.append(".");
-        result.append(ToSqlUtils.getIdentSql(p.toLowerCase()));
-      }
-      result.append(".*");
-      return result.toString();
-    } else {
-      return "*";
-    }
-  }
-
-  /**
-   * Returns a column label for this select list item.
-   * If an alias was given, then the column label is the lower case alias.
-   * If expr is a SlotRef then directly use its lower case column name.
-   * Otherwise, the label is the lower case toSql() of expr or a Hive auto-generated
-   * column name (depending on useHiveColLabels).
-   * Hive's auto-generated column labels have a "_c" prefix and a select-list pos suffix,
-   * e.g., "_c0", "_c1", "_c2", etc.
-   *
-   * Using auto-generated columns that are consistent with Hive is important
-   * for view compatibility between Impala and Hive.
-   */
-  public String toColumnLabel(int selectListPos, boolean useHiveColLabels) {
-    if (alias_ != null) return alias_.toLowerCase();
-    if (expr_ instanceof SlotRef) {
-      SlotRef slotRef = (SlotRef) expr_;
-      return Joiner.on(".").join(slotRef.getResolvedPath().getRawPath());
-    }
-    // Optionally return auto-generated column label.
-    if (useHiveColLabels) return "_c" + selectListPos;
-    // Abbreviate the toSql() for analytic exprs.
-    if (expr_ instanceof AnalyticExpr) {
-      AnalyticExpr expr = (AnalyticExpr) expr_;
-      return expr.getFnCall().toSql() + " OVER(...)";
-    }
-    return expr_.toSql().toLowerCase();
-  }
-
-  @Override
-  public SelectListItem clone() {
-    if (isStar_) return createStarItem(rawPath_);
-    return new SelectListItem(expr_.clone(), alias_);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SelectStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/SelectStmt.java
deleted file mode 100644
index 864dcc8..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SelectStmt.java
+++ /dev/null
@@ -1,1021 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ColumnAliasGenerator;
-import com.cloudera.impala.common.TableAliasGenerator;
-import com.cloudera.impala.common.TreeNode;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Representation of a single select block, including GROUP BY, ORDER BY and HAVING
- * clauses.
- */
-public class SelectStmt extends QueryStmt {
-  private final static Logger LOG = LoggerFactory.getLogger(SelectStmt.class);
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  protected SelectList selectList_;
-  protected final ArrayList<String> colLabels_; // lower case column labels
-  protected final FromClause fromClause_;
-  protected Expr whereClause_;
-  protected ArrayList<Expr> groupingExprs_;
-  protected final Expr havingClause_;  // original having clause
-
-  // havingClause with aliases and agg output resolved
-  private Expr havingPred_;
-
-  // set if we have any kind of aggregation operation, include SELECT DISTINCT
-  private AggregateInfo aggInfo_;
-
-  // set if we have AnalyticExprs in the select list/order by clause
-  private AnalyticInfo analyticInfo_;
-
-  // SQL string of this SelectStmt before inline-view expression substitution.
-  // Set in analyze().
-  protected String sqlString_;
-
-  // substitutes all exprs in this select block to reference base tables
-  // directly
-  private ExprSubstitutionMap baseTblSmap_ = new ExprSubstitutionMap();
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  SelectStmt(SelectList selectList,
-             FromClause fromClause,
-             Expr wherePredicate, ArrayList<Expr> groupingExprs,
-             Expr havingPredicate, ArrayList<OrderByElement> orderByElements,
-             LimitElement limitElement) {
-    super(orderByElements, limitElement);
-    selectList_ = selectList;
-    if (fromClause == null) {
-      fromClause_ = new FromClause();
-    } else {
-      fromClause_ = fromClause;
-    }
-    whereClause_ = wherePredicate;
-    groupingExprs_ = groupingExprs;
-    havingClause_ = havingPredicate;
-    colLabels_ = Lists.newArrayList();
-    havingPred_ = null;
-    aggInfo_ = null;
-    sortInfo_ = null;
-  }
-
-  /**
-   * @return the original select list items from the query
-   */
-  public SelectList getSelectList() { return selectList_; }
-
-  /**
-   * @return the HAVING clause post-analysis and with aliases resolved
-   */
-  public Expr getHavingPred() { return havingPred_; }
-
-  public List<TableRef> getTableRefs() { return fromClause_.getTableRefs(); }
-  public boolean hasWhereClause() { return whereClause_ != null; }
-  public boolean hasGroupByClause() { return groupingExprs_ != null; }
-  public Expr getWhereClause() { return whereClause_; }
-  public void setWhereClause(Expr whereClause) { whereClause_ = whereClause; }
-  public AggregateInfo getAggInfo() { return aggInfo_; }
-  public boolean hasAggInfo() { return aggInfo_ != null; }
-  public AnalyticInfo getAnalyticInfo() { return analyticInfo_; }
-  public boolean hasAnalyticInfo() { return analyticInfo_ != null; }
-  public boolean hasHavingClause() { return havingClause_ != null; }
-  @Override
-  public ArrayList<String> getColLabels() { return colLabels_; }
-  public ExprSubstitutionMap getBaseTblSmap() { return baseTblSmap_; }
-
-  // Column alias generator used during query rewriting.
-  private ColumnAliasGenerator columnAliasGenerator_ = null;
-  public ColumnAliasGenerator getColumnAliasGenerator() {
-    if (columnAliasGenerator_ == null) {
-      columnAliasGenerator_ = new ColumnAliasGenerator(colLabels_, null);
-    }
-    return columnAliasGenerator_;
-  }
-
-  // Table alias generator used during query rewriting.
-  private TableAliasGenerator tableAliasGenerator_ = null;
-  public TableAliasGenerator getTableAliasGenerator() {
-    if (tableAliasGenerator_ == null) {
-      tableAliasGenerator_ = new TableAliasGenerator(analyzer_, null);
-    }
-    return tableAliasGenerator_;
-  }
-
-  /**
-   * Creates resultExprs and baseTblResultExprs.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    super.analyze(analyzer);
-
-    fromClause_.analyze(analyzer);
-
-    // Generate !empty() predicates to filter out empty collections.
-    // Skip this step when analyzing a WITH-clause because CollectionTableRefs
-    // do not register collection slots in their parent in that context
-    // (see CollectionTableRef.analyze()).
-    if (!analyzer.isWithClause()) registerIsNotEmptyPredicates(analyzer);
-
-    // analyze plan hints from select list
-    selectList_.analyzePlanHints(analyzer);
-
-    // populate resultExprs_, aliasSmap_, and colLabels_
-    for (int i = 0; i < selectList_.getItems().size(); ++i) {
-      SelectListItem item = selectList_.getItems().get(i);
-      if (item.isStar()) {
-        if (item.getRawPath() != null) {
-          Path resolvedPath = analyzeStarPath(item.getRawPath(), analyzer);
-          expandStar(resolvedPath, analyzer);
-        } else {
-          expandStar(analyzer);
-        }
-      } else {
-        // Analyze the resultExpr before generating a label to ensure enforcement
-        // of expr child and depth limits (toColumn() label may call toSql()).
-        item.getExpr().analyze(analyzer);
-        if (item.getExpr().contains(Predicates.instanceOf(Subquery.class))) {
-          throw new AnalysisException(
-              "Subqueries are not supported in the select list.");
-        }
-        resultExprs_.add(item.getExpr());
-        String label = item.toColumnLabel(i, analyzer.useHiveColLabels());
-        SlotRef aliasRef = new SlotRef(label);
-        Expr existingAliasExpr = aliasSmap_.get(aliasRef);
-        if (existingAliasExpr != null && !existingAliasExpr.equals(item.getExpr())) {
-          // If we have already seen this alias, it refers to more than one column and
-          // therefore is ambiguous.
-          ambiguousAliasList_.add(aliasRef);
-        }
-        aliasSmap_.put(aliasRef, item.getExpr().clone());
-        colLabels_.add(label);
-      }
-    }
-
-    // Star exprs only expand to the scalar-typed columns/fields, so
-    // the resultExprs_ could be empty.
-    if (resultExprs_.isEmpty()) {
-      throw new AnalysisException("The star exprs expanded to an empty select list " +
-          "because the referenced tables only have complex-typed columns.\n" +
-          "Star exprs only expand to scalar-typed columns because complex-typed exprs " +
-          "are currently not supported in the select list.\n" +
-          "Affected select statement:\n" + toSql());
-    }
-
-    for (Expr expr: resultExprs_) {
-      // Complex types are currently not supported in the select list because we'd need
-      // to serialize them in a meaningful way.
-      if (expr.getType().isComplexType()) {
-        throw new AnalysisException(String.format(
-            "Expr '%s' in select list returns a complex type '%s'.\n" +
-            "Only scalar types are allowed in the select list.",
-            expr.toSql(), expr.getType().toSql()));
-      }
-      if (!expr.getType().isSupported()) {
-        throw new AnalysisException("Unsupported type '"
-            + expr.getType().toSql() + "' in '" + expr.toSql() + "'.");
-      }
-    }
-
-    if (TreeNode.contains(resultExprs_, AnalyticExpr.class)) {
-      if (fromClause_.isEmpty()) {
-        throw new AnalysisException("Analytic expressions require FROM clause.");
-      }
-
-      // do this here, not after analyzeAggregation(), otherwise the AnalyticExprs
-      // will get substituted away
-      if (selectList_.isDistinct()) {
-        throw new AnalysisException(
-            "cannot combine SELECT DISTINCT with analytic functions");
-      }
-    }
-
-    if (whereClause_ != null) {
-      whereClause_.analyze(analyzer);
-      if (whereClause_.contains(Expr.isAggregatePredicate())) {
-        throw new AnalysisException(
-            "aggregate function not allowed in WHERE clause");
-      }
-      whereClause_.checkReturnsBool("WHERE clause", false);
-      Expr e = whereClause_.findFirstOf(AnalyticExpr.class);
-      if (e != null) {
-        throw new AnalysisException(
-            "WHERE clause must not contain analytic expressions: " + e.toSql());
-      }
-      analyzer.registerConjuncts(whereClause_, false);
-    }
-
-    createSortInfo(analyzer);
-    analyzeAggregation(analyzer);
-    createAnalyticInfo(analyzer);
-    if (evaluateOrderBy_) createSortTupleInfo(analyzer);
-
-    // Remember the SQL string before inline-view expression substitution.
-    sqlString_ = toSql();
-    resolveInlineViewRefs(analyzer);
-
-    // If this block's select-project-join portion returns an empty result set and the
-    // block has no aggregation, then mark this block as returning an empty result set.
-    if (analyzer.hasEmptySpjResultSet() && aggInfo_ == null) {
-      analyzer.setHasEmptyResultSet();
-    }
-
-    ColumnLineageGraph graph = analyzer.getColumnLineageGraph();
-    if (aggInfo_ != null && !aggInfo_.getAggregateExprs().isEmpty()) {
-      graph.addDependencyPredicates(aggInfo_.getGroupingExprs());
-    }
-    if (sortInfo_ != null && hasLimit()) {
-      // When there is a LIMIT clause in conjunction with an ORDER BY, the ordering exprs
-      // must be added in the column lineage graph.
-      graph.addDependencyPredicates(sortInfo_.getOrderingExprs());
-    }
-
-    if (aggInfo_ != null) LOG.debug("post-analysis " + aggInfo_.debugString());
-  }
-
-  /**
-   * Generates and registers !empty() predicates to filter out empty collections directly
-   * in the parent scan of collection table refs. This is a performance optimization to
-   * avoid the expensive processing of empty collections inside a subplan that would
-   * yield an empty result set.
-   *
-   * For correctness purposes, the predicates are generated in cases where we can ensure
-   * that they will be assigned only to the parent scan, and no other plan node.
-   *
-   * The conditions are as follows:
-   * - collection table ref is relative and non-correlated
-   * - collection table ref represents the rhs of an inner/cross/semi join
-   * - collection table ref's parent tuple is not outer joined
-   *
-   * TODO: In some cases, it is possible to generate !empty() predicates for a correlated
-   * table ref, but in general, that is not correct for non-trivial query blocks.
-   * For example, if the block with the correlated ref has an aggregation then adding a
-   * !empty() predicate would incorrectly discard rows from the final result set.
-   * TODO: Evaluating !empty() predicates at non-scan nodes interacts poorly with our BE
-   * projection of collection slots. For example, rows could incorrectly be filtered if
-   * a !empty() predicate is assigned to a plan node that comes after the unnest of the
-   * collection that also performs the projection.
-   */
-  private void registerIsNotEmptyPredicates(Analyzer analyzer) throws AnalysisException {
-    for (TableRef tblRef: fromClause_.getTableRefs()) {
-      Preconditions.checkState(tblRef.isResolved());
-      if (!(tblRef instanceof CollectionTableRef)) continue;
-      CollectionTableRef ref = (CollectionTableRef) tblRef;
-      // Skip non-relative and correlated refs.
-      if (!ref.isRelative() || ref.isCorrelated()) continue;
-      // Skip outer and anti joins.
-      if (ref.getJoinOp().isOuterJoin() || ref.getJoinOp().isAntiJoin()) continue;
-      // Do not generate a predicate if the parent tuple is outer joined.
-      if (analyzer.isOuterJoined(ref.getResolvedPath().getRootDesc().getId())) continue;
-      IsNotEmptyPredicate isNotEmptyPred =
-          new IsNotEmptyPredicate(ref.getCollectionExpr().clone());
-      isNotEmptyPred.analyze(analyzer);
-      // Register the predicate as an On-clause conjunct because it should only
-      // affect the result of this join and not the whole FROM clause.
-      analyzer.registerOnClauseConjuncts(
-          Lists.<Expr>newArrayList(isNotEmptyPred), ref);
-    }
-  }
-
-  /**
-   * Marks all unassigned join predicates as well as exprs in aggInfo and sortInfo.
-   */
-  @Override
-  public void materializeRequiredSlots(Analyzer analyzer) {
-    // Mark unassigned join predicates. Some predicates that must be evaluated by a join
-    // can also be safely evaluated below the join (picked up by getBoundPredicates()).
-    // Such predicates will be marked twice and that is ok.
-    List<Expr> unassigned =
-        analyzer.getUnassignedConjuncts(getTableRefIds(), true);
-    List<Expr> unassignedJoinConjuncts = Lists.newArrayList();
-    for (Expr e: unassigned) {
-      if (analyzer.evalByJoin(e)) unassignedJoinConjuncts.add(e);
-    }
-    List<Expr> baseTblJoinConjuncts =
-        Expr.substituteList(unassignedJoinConjuncts, baseTblSmap_, analyzer, false);
-    materializeSlots(analyzer, baseTblJoinConjuncts);
-
-    if (evaluateOrderBy_) {
-      // mark ordering exprs before marking agg/analytic exprs because they could contain
-      // agg/analytic exprs that are not referenced anywhere but the ORDER BY clause
-      sortInfo_.materializeRequiredSlots(analyzer, baseTblSmap_);
-    }
-
-    if (hasAnalyticInfo()) {
-      // Mark analytic exprs before marking agg exprs because they could contain agg
-      // exprs that are not referenced anywhere but the analytic expr.
-      // Gather unassigned predicates and mark their slots. It is not desirable
-      // to account for propagated predicates because if an analytic expr is only
-      // referenced by a propagated predicate, then it's better to not materialize the
-      // analytic expr at all.
-      ArrayList<TupleId> tids = Lists.newArrayList();
-      getMaterializedTupleIds(tids); // includes the analytic tuple
-      List<Expr> conjuncts = analyzer.getUnassignedConjuncts(tids, false);
-      materializeSlots(analyzer, conjuncts);
-      analyticInfo_.materializeRequiredSlots(analyzer, baseTblSmap_);
-    }
-
-    if (aggInfo_ != null) {
-      // mark all agg exprs needed for HAVING pred and binding predicates as materialized
-      // before calling AggregateInfo.materializeRequiredSlots(), otherwise they won't
-      // show up in AggregateInfo.getMaterializedAggregateExprs()
-      ArrayList<Expr> havingConjuncts = Lists.newArrayList();
-      if (havingPred_ != null) havingConjuncts.add(havingPred_);
-      // Ignore predicates bound to a group-by slot because those
-      // are already evaluated below this agg node (e.g., in a scan).
-      Set<SlotId> groupBySlots = Sets.newHashSet();
-      for (int i = 0; i < aggInfo_.getGroupingExprs().size(); ++i) {
-        groupBySlots.add(aggInfo_.getOutputTupleDesc().getSlots().get(i).getId());
-      }
-      // Binding predicates are assigned to the final output tuple of the aggregation,
-      // which is the tuple of the 2nd phase agg for distinct aggs.
-      ArrayList<Expr> bindingPredicates =
-          analyzer.getBoundPredicates(aggInfo_.getResultTupleId(), groupBySlots, false);
-      havingConjuncts.addAll(bindingPredicates);
-      havingConjuncts.addAll(
-          analyzer.getUnassignedConjuncts(aggInfo_.getResultTupleId().asList(), false));
-      materializeSlots(analyzer, havingConjuncts);
-      aggInfo_.materializeRequiredSlots(analyzer, baseTblSmap_);
-    }
-  }
-
-  /**
-    * Populates baseTblSmap_ with our combined inline view smap and creates
-    * baseTblResultExprs.
-    */
-  protected void resolveInlineViewRefs(Analyzer analyzer)
-      throws AnalysisException {
-    // Gather the inline view substitution maps from the enclosed inline views
-    for (TableRef tblRef: fromClause_) {
-      if (tblRef instanceof InlineViewRef) {
-        InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
-        baseTblSmap_ =
-            ExprSubstitutionMap.combine(baseTblSmap_, inlineViewRef.getBaseTblSmap());
-      }
-    }
-    baseTblResultExprs_ =
-        Expr.trySubstituteList(resultExprs_, baseTblSmap_, analyzer, false);
-    LOG.trace("baseTblSmap_: " + baseTblSmap_.debugString());
-    LOG.trace("resultExprs: " + Expr.debugString(resultExprs_));
-    LOG.trace("baseTblResultExprs: " + Expr.debugString(baseTblResultExprs_));
-  }
-
-  public List<TupleId> getTableRefIds() {
-    List<TupleId> result = Lists.newArrayList();
-    for (TableRef ref: fromClause_) {
-      result.add(ref.getId());
-    }
-    return result;
-  }
-
-  /**
-   * Resolves the given raw path as a STAR path and checks its legality.
-   * Returns the resolved legal path, or throws if the raw path could not
-   * be resolved or is an illegal star path.
-   */
-  private Path analyzeStarPath(List<String> rawPath, Analyzer analyzer)
-      throws AnalysisException {
-    Path resolvedPath = null;
-    try {
-      resolvedPath = analyzer.resolvePath(rawPath, PathType.STAR);
-    } catch (TableLoadingException e) {
-      // Should never happen because we only check registered table aliases.
-      Preconditions.checkState(false);
-    }
-    Preconditions.checkNotNull(resolvedPath);
-    return resolvedPath;
-  }
-
-  /**
-   * Expand "*" select list item, ignoring semi-joined tables as well as
-   * complex-typed fields because those are currently illegal in any select
-   * list (even for inline views, etc.)
-   */
-  private void expandStar(Analyzer analyzer) throws AnalysisException {
-    if (fromClause_.isEmpty()) {
-      throw new AnalysisException("'*' expression in select list requires FROM clause.");
-    }
-    // expand in From clause order
-    for (TableRef tableRef: fromClause_) {
-      if (analyzer.isSemiJoined(tableRef.getId())) continue;
-      Path resolvedPath = new Path(tableRef.getDesc(), Collections.<String>emptyList());
-      Preconditions.checkState(resolvedPath.resolve());
-      expandStar(resolvedPath, analyzer);
-    }
-  }
-
-  /**
-   * Expand "path.*" from a resolved path, ignoring complex-typed fields because those
-   * are currently illegal in any select list (even for inline views, etc.)
-   */
-  private void expandStar(Path resolvedPath, Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkState(resolvedPath.isResolved());
-    if (resolvedPath.destTupleDesc() != null &&
-        resolvedPath.destTupleDesc().getTable() != null &&
-        resolvedPath.destTupleDesc().getPath().getMatchedTypes().isEmpty()) {
-      // The resolved path targets a registered tuple descriptor of a catalog
-      // table. Expand the '*' based on the Hive-column order.
-      TupleDescriptor tupleDesc = resolvedPath.destTupleDesc();
-      Table table = tupleDesc.getTable();
-      for (Column c: table.getColumnsInHiveOrder()) {
-        addStarResultExpr(resolvedPath, analyzer, c.getName());
-      }
-    } else {
-      // The resolved path does not target the descriptor of a catalog table.
-      // Expand '*' based on the destination type of the resolved path.
-      Preconditions.checkState(resolvedPath.destType().isStructType());
-      StructType structType = (StructType) resolvedPath.destType();
-      Preconditions.checkNotNull(structType);
-
-      // Star expansion for references to nested collections.
-      // Collection Type                    Star Expansion
-      // array<int>                     --> item
-      // array<struct<f1,f2,...,fn>>    --> f1, f2, ..., fn
-      // map<int,int>                   --> key, value
-      // map<int,struct<f1,f2,...,fn>>  --> key, f1, f2, ..., fn
-      if (structType instanceof CollectionStructType) {
-        CollectionStructType cst = (CollectionStructType) structType;
-        if (cst.isMapStruct()) {
-          addStarResultExpr(resolvedPath, analyzer, Path.MAP_KEY_FIELD_NAME);
-        }
-        if (cst.getOptionalField().getType().isStructType()) {
-          structType = (StructType) cst.getOptionalField().getType();
-          for (StructField f: structType.getFields()) {
-            addStarResultExpr(
-                resolvedPath, analyzer, cst.getOptionalField().getName(), f.getName());
-          }
-        } else if (cst.isMapStruct()) {
-          addStarResultExpr(resolvedPath, analyzer, Path.MAP_VALUE_FIELD_NAME);
-        } else {
-          addStarResultExpr(resolvedPath, analyzer, Path.ARRAY_ITEM_FIELD_NAME);
-        }
-      } else {
-        // Default star expansion.
-        for (StructField f: structType.getFields()) {
-          addStarResultExpr(resolvedPath, analyzer, f.getName());
-        }
-      }
-    }
-  }
-
-  /**
-   * Helper function used during star expansion to add a single result expr
-   * based on a given raw path to be resolved relative to an existing path.
-   * Ignores paths with a complex-typed destination because they are currently
-   * illegal in any select list (even for inline views, etc.)
-   */
-  private void addStarResultExpr(Path resolvedPath, Analyzer analyzer,
-      String... relRawPath) throws AnalysisException {
-    Path p = Path.createRelPath(resolvedPath, relRawPath);
-    Preconditions.checkState(p.resolve());
-    if (p.destType().isComplexType()) return;
-    SlotDescriptor slotDesc = analyzer.registerSlotRef(p);
-    SlotRef slotRef = new SlotRef(slotDesc);
-    slotRef.analyze(analyzer);
-    resultExprs_.add(slotRef);
-    colLabels_.add(relRawPath[relRawPath.length - 1]);
-  }
-
-  /**
-   * Analyze aggregation-relevant components of the select block (Group By clause,
-   * select list, Order By clause), substitute AVG with SUM/COUNT, create the
-   * AggregationInfo, including the agg output tuple, and transform all post-agg exprs
-   * given AggregationInfo's smap.
-   */
-  private void analyzeAggregation(Analyzer analyzer) throws AnalysisException {
-    // Analyze the HAVING clause first so we can check if it contains aggregates.
-    // We need to analyze/register it even if we are not computing aggregates.
-    if (havingClause_ != null) {
-      if (havingClause_.contains(Predicates.instanceOf(Subquery.class))) {
-        throw new AnalysisException(
-            "Subqueries are not supported in the HAVING clause.");
-      }
-      // substitute aliases in place (ordinals not allowed in having clause)
-      havingPred_ = havingClause_.substitute(aliasSmap_, analyzer, false);
-      havingPred_.checkReturnsBool("HAVING clause", true);
-      // can't contain analytic exprs
-      Expr analyticExpr = havingPred_.findFirstOf(AnalyticExpr.class);
-      if (analyticExpr != null) {
-        throw new AnalysisException(
-            "HAVING clause must not contain analytic expressions: "
-               + analyticExpr.toSql());
-      }
-    }
-
-    if (groupingExprs_ == null && !selectList_.isDistinct()
-        && !TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())
-        && (havingPred_ == null
-            || !havingPred_.contains(Expr.isAggregatePredicate()))
-        && (sortInfo_ == null
-            || !TreeNode.contains(sortInfo_.getOrderingExprs(),
-                                  Expr.isAggregatePredicate()))) {
-      // We're not computing aggregates but we still need to register the HAVING
-      // clause which could, e.g., contain a constant expression evaluating to false.
-      if (havingPred_ != null) analyzer.registerConjuncts(havingPred_, true);
-      return;
-    }
-
-    // If we're computing an aggregate, we must have a FROM clause.
-    if (fromClause_.isEmpty()) {
-      throw new AnalysisException(
-          "aggregation without a FROM clause is not allowed");
-    }
-
-    if (selectList_.isDistinct()
-        && (groupingExprs_ != null
-            || TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())
-            || (havingPred_ != null
-                && havingPred_.contains(Expr.isAggregatePredicate())))) {
-      throw new AnalysisException(
-        "cannot combine SELECT DISTINCT with aggregate functions or GROUP BY");
-    }
-
-    // Disallow '*' with explicit GROUP BY or aggregation function (we can't group by
-    // '*', and if you need to name all star-expanded cols in the group by clause you
-    // might as well do it in the select list).
-    if (groupingExprs_ != null ||
-        TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())) {
-      for (SelectListItem item : selectList_.getItems()) {
-        if (item.isStar()) {
-          throw new AnalysisException(
-              "cannot combine '*' in select list with grouping or aggregation");
-        }
-      }
-    }
-
-    // disallow subqueries in the GROUP BY clause
-    if (groupingExprs_ != null) {
-      for (Expr expr: groupingExprs_) {
-        if (expr.contains(Predicates.instanceOf(Subquery.class))) {
-          throw new AnalysisException(
-              "Subqueries are not supported in the GROUP BY clause.");
-        }
-      }
-    }
-
-    // analyze grouping exprs
-    ArrayList<Expr> groupingExprsCopy = Lists.newArrayList();
-    if (groupingExprs_ != null) {
-      // make a deep copy here, we don't want to modify the original
-      // exprs during analysis (in case we need to print them later)
-      groupingExprsCopy = Expr.cloneList(groupingExprs_);
-
-      substituteOrdinalsAliases(groupingExprsCopy, "GROUP BY", analyzer);
-
-      for (int i = 0; i < groupingExprsCopy.size(); ++i) {
-        groupingExprsCopy.get(i).analyze(analyzer);
-        if (groupingExprsCopy.get(i).contains(Expr.isAggregatePredicate())) {
-          // reference the original expr in the error msg
-          throw new AnalysisException(
-              "GROUP BY expression must not contain aggregate functions: "
-                  + groupingExprs_.get(i).toSql());
-        }
-        if (groupingExprsCopy.get(i).contains(AnalyticExpr.class)) {
-          // reference the original expr in the error msg
-          throw new AnalysisException(
-              "GROUP BY expression must not contain analytic expressions: "
-                  + groupingExprsCopy.get(i).toSql());
-        }
-      }
-    }
-
-    // Collect the aggregate expressions from the SELECT, HAVING and ORDER BY clauses
-    // of this statement.
-    ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
-    TreeNode.collect(resultExprs_, Expr.isAggregatePredicate(), aggExprs);
-    if (havingPred_ != null) {
-      havingPred_.collect(Expr.isAggregatePredicate(), aggExprs);
-    }
-    if (sortInfo_ != null) {
-      // TODO: Avoid evaluating aggs in ignored order-bys
-      TreeNode.collect(sortInfo_.getOrderingExprs(), Expr.isAggregatePredicate(),
-          aggExprs);
-    }
-
-    // Optionally rewrite all count(distinct <expr>) into equivalent NDV() calls.
-    ExprSubstitutionMap ndvSmap = null;
-    if (analyzer.getQueryCtx().getRequest().query_options.appx_count_distinct) {
-      ndvSmap = new ExprSubstitutionMap();
-      for (FunctionCallExpr aggExpr: aggExprs) {
-        if (!aggExpr.isDistinct()
-            || !aggExpr.getFnName().getFunction().equals("count")
-            || aggExpr.getParams().size() != 1) {
-          continue;
-        }
-        FunctionCallExpr ndvFnCall =
-            new FunctionCallExpr("ndv", aggExpr.getParams().exprs());
-        ndvFnCall.analyzeNoThrow(analyzer);
-        Preconditions.checkState(ndvFnCall.getType().equals(aggExpr.getType()));
-        ndvSmap.put(aggExpr, ndvFnCall);
-      }
-      // Replace all count(distinct <expr>) with NDV(<expr>).
-      List<Expr> substAggExprs = Expr.substituteList(aggExprs, ndvSmap, analyzer, false);
-      aggExprs.clear();
-      for (Expr aggExpr: substAggExprs) {
-        Preconditions.checkState(aggExpr instanceof FunctionCallExpr);
-        aggExprs.add((FunctionCallExpr) aggExpr);
-      }
-    }
-
-    // When DISTINCT aggregates are present, non-distinct (i.e. ALL) aggregates are
-    // evaluated in two phases (see AggregateInfo for more details). In particular,
-    // COUNT(c) in "SELECT COUNT(c), AGG(DISTINCT d) from R" is transformed to
-    // "SELECT SUM(cnt) FROM (SELECT COUNT(c) as cnt from R group by d ) S".
-    // Since a group-by expression is added to the inner query it returns no rows if
-    // R is empty, in which case the SUM of COUNTs will return NULL.
-    // However the original COUNT(c) should have returned 0 instead of NULL in this case.
-    // Therefore, COUNT([ALL]) is transformed into zeroifnull(COUNT([ALL]) if
-    // i) There is no GROUP-BY clause, and
-    // ii) Other DISTINCT aggregates are present.
-    ExprSubstitutionMap countAllMap = createCountAllMap(aggExprs, analyzer);
-    countAllMap = ExprSubstitutionMap.compose(ndvSmap, countAllMap, analyzer);
-    List<Expr> substitutedAggs =
-        Expr.substituteList(aggExprs, countAllMap, analyzer, false);
-    aggExprs.clear();
-    TreeNode.collect(substitutedAggs, Expr.isAggregatePredicate(), aggExprs);
-    createAggInfo(groupingExprsCopy, aggExprs, analyzer);
-
-    // combine avg smap with the one that produces the final agg output
-    AggregateInfo finalAggInfo =
-        aggInfo_.getSecondPhaseDistinctAggInfo() != null
-          ? aggInfo_.getSecondPhaseDistinctAggInfo()
-          : aggInfo_;
-
-    ExprSubstitutionMap combinedSmap =
-        ExprSubstitutionMap.compose(countAllMap, finalAggInfo.getOutputSmap(), analyzer);
-    LOG.trace("combined smap: " + combinedSmap.debugString());
-
-    // change select list, having and ordering exprs to point to agg output. We need
-    // to reanalyze the exprs at this point.
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
-    LOG.trace("resultexprs: " + Expr.debugString(resultExprs_));
-    resultExprs_ = Expr.substituteList(resultExprs_, combinedSmap, analyzer, false);
-    LOG.trace("post-agg selectListExprs: " + Expr.debugString(resultExprs_));
-    if (havingPred_ != null) {
-      // Make sure the predicate in the HAVING clause does not contain a
-      // subquery.
-      Preconditions.checkState(!havingPred_.contains(
-          Predicates.instanceOf(Subquery.class)));
-      havingPred_ = havingPred_.substitute(combinedSmap, analyzer, false);
-      analyzer.registerConjuncts(havingPred_, true);
-      LOG.debug("post-agg havingPred: " + havingPred_.debugString());
-    }
-    if (sortInfo_ != null) {
-      sortInfo_.substituteOrderingExprs(combinedSmap, analyzer);
-      LOG.debug("post-agg orderingExprs: " +
-          Expr.debugString(sortInfo_.getOrderingExprs()));
-    }
-
-    // check that all post-agg exprs point to agg output
-    for (int i = 0; i < selectList_.getItems().size(); ++i) {
-      if (!resultExprs_.get(i).isBound(finalAggInfo.getOutputTupleId())) {
-        SelectListItem selectListItem = selectList_.getItems().get(i);
-        Preconditions.checkState(!selectListItem.isStar());
-        throw new AnalysisException(
-            "select list expression not produced by aggregation output "
-            + "(missing from GROUP BY clause?): "
-            + selectListItem.getExpr().toSql());
-      }
-    }
-    if (orderByElements_ != null) {
-      for (int i = 0; i < orderByElements_.size(); ++i) {
-        if (!sortInfo_.getOrderingExprs().get(i).isBound(
-            finalAggInfo.getOutputTupleId())) {
-          throw new AnalysisException(
-              "ORDER BY expression not produced by aggregation output "
-              + "(missing from GROUP BY clause?): "
-              + orderByElements_.get(i).getExpr().toSql());
-        }
-      }
-    }
-    if (havingPred_ != null) {
-      if (!havingPred_.isBound(finalAggInfo.getOutputTupleId())) {
-        throw new AnalysisException(
-            "HAVING clause not produced by aggregation output "
-            + "(missing from GROUP BY clause?): "
-            + havingClause_.toSql());
-      }
-    }
-  }
-
-  /**
-   * Create a map from COUNT([ALL]) -> zeroifnull(COUNT([ALL])) if
-   * i) There is no GROUP-BY, and
-   * ii) There are other distinct aggregates to be evaluated.
-   * This transformation is necessary for COUNT to correctly return 0 for empty
-   * input relations.
-   */
-  private ExprSubstitutionMap createCountAllMap(
-      List<FunctionCallExpr> aggExprs, Analyzer analyzer)
-      throws AnalysisException {
-    ExprSubstitutionMap scalarCountAllMap = new ExprSubstitutionMap();
-
-    if (groupingExprs_ != null && !groupingExprs_.isEmpty()) {
-      // There are grouping expressions, so no substitution needs to be done.
-      return scalarCountAllMap;
-    }
-
-    com.google.common.base.Predicate<FunctionCallExpr> isNotDistinctPred =
-        new com.google.common.base.Predicate<FunctionCallExpr>() {
-          public boolean apply(FunctionCallExpr expr) {
-            return !expr.isDistinct();
-          }
-        };
-    if (Iterables.all(aggExprs, isNotDistinctPred)) {
-      // Only [ALL] aggs, so no substitution needs to be done.
-      return scalarCountAllMap;
-    }
-
-    com.google.common.base.Predicate<FunctionCallExpr> isCountPred =
-        new com.google.common.base.Predicate<FunctionCallExpr>() {
-          public boolean apply(FunctionCallExpr expr) {
-            return expr.getFnName().getFunction().equals("count");
-          }
-        };
-
-    Iterable<FunctionCallExpr> countAllAggs =
-        Iterables.filter(aggExprs, Predicates.and(isCountPred, isNotDistinctPred));
-    for (FunctionCallExpr countAllAgg: countAllAggs) {
-      // Replace COUNT(ALL) with zeroifnull(COUNT(ALL))
-      ArrayList<Expr> zeroIfNullParam = Lists.newArrayList(countAllAgg.clone());
-      FunctionCallExpr zeroIfNull =
-          new FunctionCallExpr("zeroifnull", zeroIfNullParam);
-      zeroIfNull.analyze(analyzer);
-      scalarCountAllMap.put(countAllAgg, zeroIfNull);
-    }
-
-    return scalarCountAllMap;
-  }
-
-  /**
-   * Create aggInfo for the given grouping and agg exprs.
-   */
-  private void createAggInfo(ArrayList<Expr> groupingExprs,
-      ArrayList<FunctionCallExpr> aggExprs, Analyzer analyzer)
-          throws AnalysisException {
-    if (selectList_.isDistinct()) {
-       // Create aggInfo for SELECT DISTINCT ... stmt:
-       // - all select list items turn into grouping exprs
-       // - there are no aggregate exprs
-      Preconditions.checkState(groupingExprs.isEmpty());
-      Preconditions.checkState(aggExprs.isEmpty());
-      ArrayList<Expr> distinctGroupingExprs = Expr.cloneList(resultExprs_);
-      aggInfo_ =
-          AggregateInfo.create(distinctGroupingExprs, null, null, analyzer);
-    } else {
-      aggInfo_ = AggregateInfo.create(groupingExprs, aggExprs, null, analyzer);
-    }
-  }
-
-  /**
-   * If the select list contains AnalyticExprs, create AnalyticInfo and substitute
-   * AnalyticExprs using the AnalyticInfo's smap.
-   */
-  private void createAnalyticInfo(Analyzer analyzer)
-      throws AnalysisException {
-    // collect AnalyticExprs from the SELECT and ORDER BY clauses
-    ArrayList<Expr> analyticExprs = Lists.newArrayList();
-    TreeNode.collect(resultExprs_, AnalyticExpr.class, analyticExprs);
-    if (sortInfo_ != null) {
-      TreeNode.collect(sortInfo_.getOrderingExprs(), AnalyticExpr.class,
-          analyticExprs);
-    }
-    if (analyticExprs.isEmpty()) return;
-    ExprSubstitutionMap rewriteSmap = new ExprSubstitutionMap();
-    for (Expr expr: analyticExprs) {
-      AnalyticExpr toRewrite = (AnalyticExpr)expr;
-      Expr newExpr = AnalyticExpr.rewrite(toRewrite);
-      if (newExpr != null) {
-        newExpr.analyze(analyzer);
-        if (!rewriteSmap.containsMappingFor(toRewrite)) {
-          rewriteSmap.put(toRewrite, newExpr);
-        }
-      }
-    }
-    if (rewriteSmap.size() > 0) {
-      // Substitute the exprs with their rewritten versions.
-      ArrayList<Expr> updatedAnalyticExprs =
-          Expr.substituteList(analyticExprs, rewriteSmap, analyzer, false);
-      // This is to get rid the original exprs which have been rewritten.
-      analyticExprs.clear();
-      // Collect the new exprs introduced through the rewrite and the non-rewrite exprs.
-      TreeNode.collect(updatedAnalyticExprs, AnalyticExpr.class, analyticExprs);
-    }
-
-    analyticInfo_ = AnalyticInfo.create(analyticExprs, analyzer);
-
-    ExprSubstitutionMap smap = analyticInfo_.getSmap();
-    // If 'exprRewritten' is true, we have to compose the new smap with the existing one.
-    if (rewriteSmap.size() > 0) {
-      smap = ExprSubstitutionMap.compose(
-          rewriteSmap, analyticInfo_.getSmap(), analyzer);
-    }
-    // change select list and ordering exprs to point to analytic output. We need
-    // to reanalyze the exprs at this point.
-    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer,
-        false);
-    LOG.trace("post-analytic selectListExprs: " + Expr.debugString(resultExprs_));
-    if (sortInfo_ != null) {
-      sortInfo_.substituteOrderingExprs(smap, analyzer);
-      LOG.trace("post-analytic orderingExprs: " +
-          Expr.debugString(sortInfo_.getOrderingExprs()));
-    }
-  }
-
-  /**
-   * Returns the SQL string corresponding to this SelectStmt.
-   */
-  @Override
-  public String toSql() {
-    // Return the SQL string before inline-view expression substitution.
-    if (sqlString_ != null) return sqlString_;
-
-    StringBuilder strBuilder = new StringBuilder();
-    if (withClause_ != null) {
-      strBuilder.append(withClause_.toSql());
-      strBuilder.append(" ");
-    }
-
-    // Select list
-    strBuilder.append("SELECT ");
-    if (selectList_.isDistinct()) {
-      strBuilder.append("DISTINCT ");
-    }
-    if (selectList_.hasPlanHints()) {
-      strBuilder.append(ToSqlUtils.getPlanHintsSql(selectList_.getPlanHints()) + " ");
-    }
-    for (int i = 0; i < selectList_.getItems().size(); ++i) {
-      strBuilder.append(selectList_.getItems().get(i).toSql());
-      strBuilder.append((i+1 != selectList_.getItems().size()) ? ", " : "");
-    }
-    // From clause
-    if (!fromClause_.isEmpty()) { strBuilder.append(fromClause_.toSql()); }
-    // Where clause
-    if (whereClause_ != null) {
-      strBuilder.append(" WHERE ");
-      strBuilder.append(whereClause_.toSql());
-    }
-    // Group By clause
-    if (groupingExprs_ != null) {
-      strBuilder.append(" GROUP BY ");
-      for (int i = 0; i < groupingExprs_.size(); ++i) {
-        strBuilder.append(groupingExprs_.get(i).toSql());
-        strBuilder.append((i+1 != groupingExprs_.size()) ? ", " : "");
-      }
-    }
-    // Having clause
-    if (havingClause_ != null) {
-      strBuilder.append(" HAVING ");
-      strBuilder.append(havingClause_.toSql());
-    }
-    // Order By clause
-    if (orderByElements_ != null) {
-      strBuilder.append(" ORDER BY ");
-      for (int i = 0; i < orderByElements_.size(); ++i) {
-        strBuilder.append(orderByElements_.get(i).toSql());
-        strBuilder.append((i+1 != orderByElements_.size()) ? ", " : "");
-      }
-    }
-    // Limit clause.
-    strBuilder.append(limitElement_.toSql());
-    return strBuilder.toString();
-  }
-
-  /**
-   * If the select statement has a sort/top that is evaluated, then the sort tuple
-   * is materialized. Else, if there is aggregation then the aggregate tuple id is
-   * materialized. Otherwise, all referenced tables are materialized as long as they are
-   * not semi-joined. If there are analytics and no sort, then the returned tuple
-   * ids also include the logical analytic output tuple.
-   */
-  @Override
-  public void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList) {
-    if (evaluateOrderBy_) {
-      tupleIdList.add(sortInfo_.getSortTupleDescriptor().getId());
-    } else if (aggInfo_ != null) {
-      // Return the tuple id produced in the final aggregation step.
-      tupleIdList.add(aggInfo_.getResultTupleId());
-    } else {
-      for (TableRef tblRef: fromClause_) {
-        // Don't include materialized tuple ids from semi-joined table
-        // refs (see IMPALA-1526)
-        if (tblRef.getJoinOp().isLeftSemiJoin()) continue;
-        // Remove the materialized tuple ids of all the table refs that
-        // are semi-joined by the right semi/anti join.
-        if (tblRef.getJoinOp().isRightSemiJoin()) tupleIdList.clear();
-        tupleIdList.addAll(tblRef.getMaterializedTupleIds());
-      }
-    }
-    // We materialize the agg tuple or the table refs together with the analytic tuple.
-    if (hasAnalyticInfo() && !evaluateOrderBy_) {
-      tupleIdList.add(analyticInfo_.getOutputTupleId());
-    }
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private SelectStmt(SelectStmt other) {
-    super(other);
-    selectList_ = other.selectList_.clone();
-    fromClause_ = other.fromClause_.clone();
-    whereClause_ = (other.whereClause_ != null) ? other.whereClause_.clone() : null;
-    groupingExprs_ =
-        (other.groupingExprs_ != null) ? Expr.cloneList(other.groupingExprs_) : null;
-    havingClause_ = (other.havingClause_ != null) ? other.havingClause_.clone() : null;
-    colLabels_ = Lists.newArrayList(other.colLabels_);
-    aggInfo_ = (other.aggInfo_ != null) ? other.aggInfo_.clone() : null;
-    analyticInfo_ = (other.analyticInfo_ != null) ? other.analyticInfo_.clone() : null;
-    sqlString_ = (other.sqlString_ != null) ? new String(other.sqlString_) : null;
-    baseTblSmap_ = other.baseTblSmap_.clone();
-  }
-
-  @Override
-  public void collectTableRefs(List<TableRef> tblRefs) {
-    for (TableRef tblRef: fromClause_) {
-      if (tblRef instanceof InlineViewRef) {
-        InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
-        inlineViewRef.getViewStmt().collectTableRefs(tblRefs);
-      } else {
-        tblRefs.add(tblRef);
-      }
-    }
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    selectList_.reset();
-    colLabels_.clear();
-    fromClause_.reset();
-    baseTblSmap_.clear();
-    if (whereClause_ != null) whereClause_.reset();
-    if (groupingExprs_ != null) Expr.resetList(groupingExprs_);
-    if (havingClause_ != null) havingClause_.reset();
-  }
-
-  @Override
-  public SelectStmt clone() { return new SelectStmt(this); }
-
-  /**
-   * Check if the stmt returns a single row. This can happen
-   * in the following cases:
-   * 1. select stmt with a 'limit 1' clause
-   * 2. select stmt with an aggregate function and no group by.
-   * 3. select stmt with no from clause.
-   *
-   * This function may produce false negatives because the cardinality of the
-   * result set also depends on the data a stmt is processing.
-   */
-  public boolean returnsSingleRow() {
-    // limit 1 clause
-    if (limitElement_ != null && limitElement_.getLimit() == 1) return true;
-    // No from clause (base tables or inline views)
-    if (fromClause_.isEmpty()) return true;
-    // Aggregation with no group by and no DISTINCT
-    if (hasAggInfo() && !hasGroupByClause() && !selectList_.isDistinct()) return true;
-    // In all other cases, return false.
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/SetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/SetStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/SetStmt.java
deleted file mode 100644
index d44f5b4..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/SetStmt.java
+++ /dev/null
@@ -1,69 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.thrift.TSetQueryOptionRequest;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SET query options statement.
- */
-public class SetStmt extends StatementBase {
-  private final String key_;
-  private final String value_;
-
-  // This key is deprecated in CDH5.2; COMPRESSION_CODEC_KEY replaces this
-  private static final String DEPRECATED_PARQUET_CODEC_KEY = "PARQUET_COMPRESSION_CODEC";
-  private static final String COMPRESSION_CODEC_KEY = "COMPRESSION_CODEC";
-
-  // maps the given key name to a key defined in the thrift file
-  private static String resolveThriftKey(String key) {
-    if (key.toLowerCase().equals(DEPRECATED_PARQUET_CODEC_KEY.toLowerCase())) {
-      return COMPRESSION_CODEC_KEY;
-    }
-    return key;
-  }
-
-  public SetStmt(String key, String value) {
-    Preconditions.checkArgument((key == null) == (value == null));
-    Preconditions.checkArgument(key == null || !key.isEmpty());
-    key_ = key;
-    value_ = value;
-  }
-
-  @Override
-  public String toSql() {
-    if (key_ == null) return "SET";
-    Preconditions.checkNotNull(value_);
-    return "SET " + ToSqlUtils.getIdentSql(key_) + "='" + value_ + "'";
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) {
-    // Query option key is validated by the backend.
-  }
-
-  public TSetQueryOptionRequest toThrift() {
-    TSetQueryOptionRequest request = new TSetQueryOptionRequest();
-    if (key_ != null) {
-      request.setKey(resolveThriftKey(key_));
-      request.setValue(value_);
-    }
-    return request;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateFunctionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateFunctionStmt.java
deleted file mode 100644
index e3ac551..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateFunctionStmt.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGetFunctionsParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SHOW CREATE FUNCTION statement which returns the "CREATE FUNCTION ..."
- * string that re-creates the function.
- *
- * Syntax: SHOW CREATE [AGGREGATE] FUNCTION [<db_name>.]<function_name>
- */
-public class ShowCreateFunctionStmt extends StatementBase {
-  private final FunctionName functionName_;
-  private final TFunctionCategory category_;
-
-  public ShowCreateFunctionStmt(FunctionName functionName, TFunctionCategory category) {
-    Preconditions.checkNotNull(functionName);
-    Preconditions.checkArgument(category == TFunctionCategory.SCALAR ||
-        category == TFunctionCategory.AGGREGATE);
-    functionName_ = functionName;
-    category_ = category;
-  }
-
-  @Override
-  public String toSql() {
-    return "SHOW CREATE " +
-        (category_ == TFunctionCategory.AGGREGATE ? "AGGREGATE " : "") +
-        "FUNCTION " + functionName_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    functionName_.analyze(analyzer);
-    Db db = analyzer.getDb(functionName_.getDb(), Privilege.VIEW_METADATA);
-    List<Function> functions = db.getFunctions(category_, functionName_.getFunction());
-    if (functions.isEmpty()) {
-      throw new AnalysisException("Function " + functionName_.getFunction() + "() " +
-          "does not exist in database " + functionName_.getDb());
-    }
-  }
-
-  public TGetFunctionsParams toThrift() {
-    TGetFunctionsParams params = new TGetFunctionsParams();
-    params.setCategory(category_);
-    params.setDb(functionName_.getDb());
-    params.setPattern(functionName_.getFunction());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
deleted file mode 100644
index 68b593c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a SHOW CREATE TABLE statement which returns either the
- * "CREATE TABLE ..." string that re-creates the table or the "CREATE VIEW ..."
- * string that re-creates the view as appropriate.
- *
- * Syntax: SHOW CREATE (TABLE|VIEW) <table or view>
- */
-public class ShowCreateTableStmt extends StatementBase {
-  private TableName tableName_;
-
-  // The object type keyword used, e.g. TABLE or VIEW, needed to output matching SQL.
-  private TCatalogObjectType objectType_;
-
-  public ShowCreateTableStmt(TableName table, TCatalogObjectType objectType) {
-    Preconditions.checkNotNull(table);
-    this.tableName_ = table;
-    this.objectType_ = objectType;
-  }
-
-  @Override
-  public String toSql() {
-    return "SHOW CREATE " + objectType_.name() + " " + tableName_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    tableName_ = analyzer.getFqTableName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
-    if (table instanceof View) {
-      View view = (View)table;
-      // Analyze the view query statement with its own analyzer for authorization.
-      Analyzer viewAnalyzer = new Analyzer(analyzer);
-      // Only show the view's query if the user has permissions to execute the query, to
-      // avoid revealing information, e.g. tables the user does not have access to.
-      // Report a masked authorization message if authorization fails.
-      viewAnalyzer.setAuthErrMsg(String.format("User '%s' does not have privileges to " +
-          "see the definition of view '%s'.",
-          analyzer.getUser().getName(), view.getFullName()));
-      QueryStmt viewQuery = view.getQueryStmt().clone();
-      // Views from the Hive metastore may rely on Hive's column naming if the SQL
-      // statement references a column by its implicitly defined column names.
-      viewAnalyzer.setUseHiveColLabels(true);
-      viewQuery.analyze(viewAnalyzer);
-    }
-  }
-
-  public TTableName toThrift() {
-    TTableName params = new TTableName();
-    params.setTable_name(tableName_.getTbl());
-    params.setDb_name(tableName_.getDb());
-    return params;
-  }
-}



[17/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java b/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
deleted file mode 100644
index 2212d35..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
+++ /dev/null
@@ -1,1594 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.AnalyticInfo;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BaseTableRef;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate.Operator;
-import com.cloudera.impala.analysis.CollectionTableRef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprId;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.InlineViewRef;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.SelectStmt;
-import com.cloudera.impala.analysis.SingularRowSrcTableRef;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TableRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.analysis.UnionStmt;
-import com.cloudera.impala.analysis.UnionStmt.UnionOperand;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.Pair;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Constructs a non-executable single-node plan from an analyzed parse tree.
- * The single-node plan does not contain data exchanges or data-reduction optimizations
- * such as local aggregations that are important for distributed execution.
- * The single-node plan needs to be wrapped in a plan fragment for it to be executable.
- */
-public class SingleNodePlanner {
-  private final static Logger LOG = LoggerFactory.getLogger(SingleNodePlanner.class);
-
-  private final PlannerContext ctx_;
-
-  public SingleNodePlanner(PlannerContext ctx) {
-    ctx_ = ctx;
-  }
-
-  /**
-   * Generates and returns the root of the single-node plan for the analyzed parse tree
-   * in the planner context. The planning process recursively walks the parse tree and
-   * performs the following actions.
-   * In the top-down phase over query statements:
-   * - Materialize the slots required for evaluating expressions of that statement.
-   * - Migrate conjuncts from parent blocks into inline views and union operands.
-   * In the bottom-up phase generate the plan tree for every query statement:
-   * - Generate the plan for the FROM-clause of a select statement: The plan trees of
-   *   absolute and uncorrelated table refs are connected via JoinNodes. The relative
-   *   and correlated table refs are associated with one or more SubplanNodes.
-   * - A new SubplanNode is placed on top of an existing plan node whenever the tuples
-   *   materialized by that plan node enable evaluation of one or more relative or
-   *   correlated table refs, i.e., SubplanNodes are placed at the lowest possible point
-   *   in the plan, often right after a ScanNode materializing the (single) parent tuple.
-   * - The right-hand side of each SubplanNode is a plan tree generated by joining a
-   *   SingularRowSrcTableRef with those applicable relative and correlated refs.
-   *   A SingularRowSrcTableRef represents the current row being processed by the
-   *   SubplanNode from its input (first child).
-   * - Connecting table ref plans via JoinNodes is done in a cost-based fashion
-   *   (join-order optimization). All materialized slots, including those of tuples
-   *   materialized inside a SubplanNode, must be known for an accurate estimate of row
-   *   sizes needed for cost-based join ordering.
-   * - The remaining aggregate/analytic/orderby portions of a select statement are added
-   *   on top of the FROM-clause plan.
-   * - Whenever a new node is added to the plan tree, assign conjuncts that can be
-   *   evaluated at that node and compute the stats of that node (cardinality, etc.).
-   * - Apply combined expression substitution map of child plan nodes; if a plan node
-   *   re-maps its input, set a substitution map to be applied by parents.
-   */
-  public PlanNode createSingleNodePlan() throws ImpalaException {
-    QueryStmt queryStmt = ctx_.getQueryStmt();
-    // Use the stmt's analyzer which is not necessarily the root analyzer
-    // to detect empty result sets.
-    Analyzer analyzer = queryStmt.getAnalyzer();
-    analyzer.computeEquivClasses();
-    analyzer.getTimeline().markEvent("Equivalence classes computed");
-
-    // Mark slots referenced by output exprs as materialized, prior to generating the
-    // plan tree.
-    // We need to mark the result exprs of the topmost select block as materialized, so
-    // that PlanNode.init() can compute the final mem layout of materialized tuples
-    // (the byte size of tuples is needed for cost computations).
-    // TODO: instead of materializing everything produced by the plan root, derive
-    // referenced slots from destination fragment and add a materialization node
-    // if not all output is needed by destination fragment
-    // TODO 2: should the materialization decision be cost-based?
-    if (queryStmt.getBaseTblResultExprs() != null) {
-      analyzer.materializeSlots(queryStmt.getBaseTblResultExprs());
-    }
-
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
-    PlanNode singleNodePlan = createQueryPlan(queryStmt, analyzer,
-        ctx_.getQueryOptions().isDisable_outermost_topn());
-    Preconditions.checkNotNull(singleNodePlan);
-    return singleNodePlan;
-  }
-
-  /**
-   * Validates a single-node plan by checking that it does not contain right or
-   * full outer joins with no equi-join conjuncts that are not inside the right child
-   * of a SubplanNode. Throws a NotImplementedException if plan validation fails.
-   */
-  public void validatePlan(PlanNode planNode) throws NotImplementedException {
-    if (planNode instanceof NestedLoopJoinNode) {
-      JoinNode joinNode = (JoinNode) planNode;
-      JoinOperator joinOp = joinNode.getJoinOp();
-      if ((joinOp.isRightSemiJoin() || joinOp.isFullOuterJoin()
-           || joinOp == JoinOperator.RIGHT_OUTER_JOIN)
-          && joinNode.getEqJoinConjuncts().isEmpty()) {
-        throw new NotImplementedException(String.format("Error generating a valid " +
-            "execution plan for this query. A %s type with no equi-join " +
-            "predicates can only be executed with a single node plan.",
-            joinOp.toString()));
-      }
-    }
-
-    if (planNode instanceof SubplanNode) {
-      // Right and full outer joins with no equi-join conjuncts are ok in the right
-      // child of a SubplanNode.
-      validatePlan(planNode.getChild(0));
-    } else {
-      for (PlanNode child: planNode.getChildren()) {
-        validatePlan(child);
-      }
-    }
-  }
-
-  /**
-   * Creates an EmptyNode that 'materializes' the tuples of the given stmt.
-   * Marks all collection-typed slots referenced in stmt as non-materialized because
-   * they are never unnested, and therefore the corresponding parent scan should not
-   * materialize them.
-   */
-  private PlanNode createEmptyNode(QueryStmt stmt, Analyzer analyzer) {
-    ArrayList<TupleId> tupleIds = Lists.newArrayList();
-    stmt.getMaterializedTupleIds(tupleIds);
-    if (tupleIds.isEmpty()) {
-      // Constant selects do not have materialized tuples at this stage.
-      Preconditions.checkState(stmt instanceof SelectStmt,
-          "Only constant selects should have no materialized tuples");
-      SelectStmt selectStmt = (SelectStmt)stmt;
-      Preconditions.checkState(selectStmt.getTableRefs().isEmpty());
-      tupleIds.add(createResultTupleDescriptor(selectStmt, "empty", analyzer).getId());
-    }
-    unmarkCollectionSlots(stmt);
-    EmptySetNode node = new EmptySetNode(ctx_.getNextNodeId(), tupleIds);
-    node.init(analyzer);
-    // Set the output smap to resolve exprs referencing inline views within stmt.
-    // Not needed for a UnionStmt because it materializes its input operands.
-    if (stmt instanceof SelectStmt) {
-      node.setOutputSmap(((SelectStmt) stmt).getBaseTblSmap());
-    }
-    return node;
-  }
-
-  /**
-   * Mark all collection-typed slots in stmt as non-materialized.
-   */
-  private void unmarkCollectionSlots(QueryStmt stmt) {
-    List<TableRef> tblRefs = Lists.newArrayList();
-    stmt.collectTableRefs(tblRefs);
-    for (TableRef ref: tblRefs) {
-      if (!ref.isRelative()) continue;
-      Preconditions.checkState(ref instanceof CollectionTableRef);
-      CollectionTableRef collTblRef = (CollectionTableRef) ref;
-      Expr collExpr = collTblRef.getCollectionExpr();
-      Preconditions.checkState(collExpr instanceof SlotRef);
-      SlotRef collSlotRef = (SlotRef) collExpr;
-      collSlotRef.getDesc().setIsMaterialized(false);
-    }
-  }
-
-  /**
-   * Create plan tree for single-node execution. Generates PlanNodes for the
-   * Select/Project/Join/Union [All]/Group by/Having/Order by clauses of the query stmt.
-   */
-  private PlanNode createQueryPlan(QueryStmt stmt, Analyzer analyzer, boolean disableTopN)
-      throws ImpalaException {
-    if (analyzer.hasEmptyResultSet()) return createEmptyNode(stmt, analyzer);
-
-    PlanNode root;
-    if (stmt instanceof SelectStmt) {
-      SelectStmt selectStmt = (SelectStmt) stmt;
-      root = createSelectPlan(selectStmt, analyzer);
-
-      // insert possible AnalyticEvalNode before SortNode
-      if (((SelectStmt) stmt).getAnalyticInfo() != null) {
-        AnalyticInfo analyticInfo = selectStmt.getAnalyticInfo();
-        AnalyticPlanner analyticPlanner =
-            new AnalyticPlanner(analyticInfo, analyzer, ctx_);
-        List<Expr> inputPartitionExprs = Lists.newArrayList();
-        AggregateInfo aggInfo = selectStmt.getAggInfo();
-        root = analyticPlanner.createSingleNodePlan(root,
-            aggInfo != null ? aggInfo.getGroupingExprs() : null, inputPartitionExprs);
-        if (aggInfo != null && !inputPartitionExprs.isEmpty()) {
-          // analytic computation will benefit from a partition on inputPartitionExprs
-          aggInfo.setPartitionExprs(inputPartitionExprs);
-        }
-      }
-    } else {
-      Preconditions.checkState(stmt instanceof UnionStmt);
-      root = createUnionPlan((UnionStmt) stmt, analyzer);
-    }
-
-    // Avoid adding a sort node if the sort tuple has no materialized slots.
-    boolean sortHasMaterializedSlots = false;
-    if (stmt.evaluateOrderBy()) {
-      for (SlotDescriptor sortSlotDesc:
-        stmt.getSortInfo().getSortTupleDescriptor().getSlots()) {
-        if (sortSlotDesc.isMaterialized()) {
-          sortHasMaterializedSlots = true;
-          break;
-        }
-      }
-    }
-
-    if (stmt.evaluateOrderBy() && sortHasMaterializedSlots) {
-      long limit = stmt.getLimit();
-      // TODO: External sort could be used for very large limits
-      // not just unlimited order-by
-      boolean useTopN = stmt.hasLimit() && !disableTopN;
-      root = new SortNode(ctx_.getNextNodeId(), root, stmt.getSortInfo(),
-          useTopN, stmt.getOffset());
-      Preconditions.checkState(root.hasValidStats());
-      root.setLimit(limit);
-      root.init(analyzer);
-    } else {
-      root.setLimit(stmt.getLimit());
-      root.computeStats(analyzer);
-    }
-
-    return root;
-  }
-
-  /**
-   * If there are unassigned conjuncts that are bound by tupleIds or if there are slot
-   * equivalences for tupleIds that have not yet been enforced, returns a SelectNode on
-   * top of root that evaluates those conjuncts; otherwise returns root unchanged.
-   * TODO: change this to assign the unassigned conjuncts to root itself, if that is
-   * semantically correct
-   */
-  private PlanNode addUnassignedConjuncts(
-      Analyzer analyzer, List<TupleId> tupleIds, PlanNode root) {
-    // No point in adding SelectNode on top of an EmptyNode.
-    if (root instanceof EmptySetNode) return root;
-    Preconditions.checkNotNull(root);
-    // Gather unassigned conjuncts and generate predicates to enfore
-    // slot equivalences for each tuple id.
-    List<Expr> conjuncts = analyzer.getUnassignedConjuncts(root);
-    for (TupleId tid: tupleIds) {
-      analyzer.createEquivConjuncts(tid, conjuncts);
-    }
-    if (conjuncts.isEmpty()) return root;
-    // evaluate conjuncts in SelectNode
-    SelectNode selectNode = new SelectNode(ctx_.getNextNodeId(), root, conjuncts);
-    // init() marks conjuncts as assigned
-    selectNode.init(analyzer);
-    Preconditions.checkState(selectNode.hasValidStats());
-    return selectNode;
-  }
-
-  /**
-   * Return the cheapest plan that materializes the joins of all TableRefs in
-   * parentRefPlans and the subplans of all applicable TableRefs in subplanRefs.
-   * Assumes that parentRefPlans are in the order as they originally appeared in
-   * the query.
-   * For this plan:
-   * - the plan is executable, ie, all non-cross joins have equi-join predicates
-   * - the leftmost scan is over the largest of the inputs for which we can still
-   *   construct an executable plan
-   * - from bottom to top, all rhs's are in increasing order of selectivity (percentage
-   *   of surviving rows)
-   * - outer/cross/semi joins: rhs serialized size is < lhs serialized size;
-   *   enforced via join inversion, if necessary
-   * - SubplanNodes are placed as low as possible in the plan tree - as soon as the
-   *   required tuple ids of one or more TableRefs in subplanRefs are materialized
-   * Returns null if we can't create an executable plan.
-   */
-  private PlanNode createCheapestJoinPlan(Analyzer analyzer,
-      List<Pair<TableRef, PlanNode>> parentRefPlans, List<SubplanRef> subplanRefs)
-      throws ImpalaException {
-    LOG.trace("createCheapestJoinPlan");
-    if (parentRefPlans.size() == 1) return parentRefPlans.get(0).second;
-
-    // collect eligible candidates for the leftmost input; list contains
-    // (plan, materialized size)
-    ArrayList<Pair<TableRef, Long>> candidates = Lists.newArrayList();
-    for (Pair<TableRef, PlanNode> entry: parentRefPlans) {
-      TableRef ref = entry.first;
-      JoinOperator joinOp = ref.getJoinOp();
-
-      // Avoid reordering outer/semi joins which is generally incorrect.
-      // TODO: Allow the rhs of any cross join as the leftmost table. This needs careful
-      // consideration of the joinOps that result from such a re-ordering (IMPALA-1281).
-      if (joinOp.isOuterJoin() || joinOp.isSemiJoin() || joinOp.isCrossJoin()) continue;
-
-      PlanNode plan = entry.second;
-      if (plan.getCardinality() == -1) {
-        // use 0 for the size to avoid it becoming the leftmost input
-        // TODO: Consider raw size of scanned partitions in the absence of stats.
-        candidates.add(new Pair(ref, new Long(0)));
-        LOG.trace("candidate " + ref.getUniqueAlias() + ": 0");
-        continue;
-      }
-      Preconditions.checkState(ref.isAnalyzed());
-      long materializedSize =
-          (long) Math.ceil(plan.getAvgRowSize() * (double) plan.getCardinality());
-      candidates.add(new Pair(ref, new Long(materializedSize)));
-      LOG.trace(
-          "candidate " + ref.getUniqueAlias() + ": " + Long.toString(materializedSize));
-    }
-    if (candidates.isEmpty()) return null;
-
-    // order candidates by descending materialized size; we want to minimize the memory
-    // consumption of the materialized hash tables required for the join sequence
-    Collections.sort(candidates,
-        new Comparator<Pair<TableRef, Long>>() {
-          public int compare(Pair<TableRef, Long> a, Pair<TableRef, Long> b) {
-            long diff = b.second - a.second;
-            return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-          }
-        });
-
-    for (Pair<TableRef, Long> candidate: candidates) {
-      PlanNode result = createJoinPlan(analyzer, candidate.first, parentRefPlans, subplanRefs);
-      if (result != null) return result;
-    }
-    return null;
-  }
-
-  /**
-   * Returns a plan with leftmostRef's plan as its leftmost input; the joins
-   * are in decreasing order of selectiveness (percentage of rows they eliminate).
-   * Creates and adds subplan nodes as soon as the tuple ids required by at least one
-   * subplan ref are materialized by a join node added during plan generation.
-   */
-  private PlanNode createJoinPlan(Analyzer analyzer, TableRef leftmostRef,
-      List<Pair<TableRef, PlanNode>> refPlans, List<SubplanRef> subplanRefs)
-      throws ImpalaException {
-
-    LOG.trace("createJoinPlan: " + leftmostRef.getUniqueAlias());
-    // the refs that have yet to be joined
-    List<Pair<TableRef, PlanNode>> remainingRefs = Lists.newArrayList();
-    PlanNode root = null;  // root of accumulated join plan
-    for (Pair<TableRef, PlanNode> entry: refPlans) {
-      if (entry.first == leftmostRef) {
-        root = entry.second;
-      } else {
-        remainingRefs.add(entry);
-      }
-    }
-    Preconditions.checkNotNull(root);
-
-    // Maps from a TableRef in refPlans with an outer/semi join op to the set of
-    // TableRefs that precede it refPlans (i.e., in FROM-clause order).
-    // The map is used to place outer/semi joins at a fixed position in the plan tree
-    // (IMPALA-860), s.t. all the tables appearing to the left/right of an outer/semi
-    // join in the original query still remain to the left/right after join ordering.
-    // This prevents join re-ordering across outer/semi joins which is generally wrong.
-    Map<TableRef, Set<TableRef>> precedingRefs = Maps.newHashMap();
-    List<TableRef> tmpTblRefs = Lists.newArrayList();
-    for (Pair<TableRef, PlanNode> entry: refPlans) {
-      TableRef tblRef = entry.first;
-      if (tblRef.getJoinOp().isOuterJoin() || tblRef.getJoinOp().isSemiJoin()) {
-        precedingRefs.put(tblRef, Sets.newHashSet(tmpTblRefs));
-      }
-      tmpTblRefs.add(tblRef);
-    }
-
-    // Refs that have been joined. The union of joinedRefs and the refs in remainingRefs
-    // are the set of all table refs.
-    Set<TableRef> joinedRefs = Sets.newHashSet(leftmostRef);
-    long numOps = 0;
-    int i = 0;
-    while (!remainingRefs.isEmpty()) {
-      // We minimize the resulting cardinality at each step in the join chain,
-      // which minimizes the total number of hash table lookups.
-      PlanNode newRoot = null;
-      Pair<TableRef, PlanNode> minEntry = null;
-      for (Pair<TableRef, PlanNode> entry: remainingRefs) {
-        TableRef ref = entry.first;
-        JoinOperator joinOp = ref.getJoinOp();
-
-        // Place outer/semi joins at a fixed position in the plan tree.
-        Set<TableRef> requiredRefs = precedingRefs.get(ref);
-        if (requiredRefs != null) {
-          Preconditions.checkState(joinOp.isOuterJoin() || joinOp.isSemiJoin());
-          // If the required table refs have not been placed yet, do not even consider
-          // the remaining table refs to prevent incorrect re-ordering of tables across
-          // outer/semi joins.
-          if (!requiredRefs.equals(joinedRefs)) break;
-        }
-
-        analyzer.setAssignedConjuncts(root.getAssignedConjuncts());
-        PlanNode candidate = createJoinNode(root, entry.second, ref, analyzer);
-        if (candidate == null) continue;
-        LOG.trace("cardinality=" + Long.toString(candidate.getCardinality()));
-
-        // Use 'candidate' as the new root; don't consider any other table refs at this
-        // position in the plan.
-        if (joinOp.isOuterJoin() || joinOp.isSemiJoin()) {
-          newRoot = candidate;
-          minEntry = entry;
-          break;
-        }
-
-        // Always prefer Hash Join over Nested-Loop Join due to limited costing
-        // infrastructure.
-        if (newRoot == null
-            || (candidate.getClass().equals(newRoot.getClass())
-                && candidate.getCardinality() < newRoot.getCardinality())
-            || (candidate instanceof HashJoinNode
-                && newRoot instanceof NestedLoopJoinNode)) {
-          newRoot = candidate;
-          minEntry = entry;
-        }
-      }
-      if (newRoot == null) {
-        // Could not generate a valid plan.
-        return null;
-      }
-
-      // we need to insert every rhs row into the hash table and then look up
-      // every lhs row
-      long lhsCardinality = root.getCardinality();
-      long rhsCardinality = minEntry.second.getCardinality();
-      numOps += lhsCardinality + rhsCardinality;
-      LOG.debug(Integer.toString(i) + " chose " + minEntry.first.getUniqueAlias()
-          + " #lhs=" + Long.toString(lhsCardinality)
-          + " #rhs=" + Long.toString(rhsCardinality)
-          + " #ops=" + Long.toString(numOps));
-      remainingRefs.remove(minEntry);
-      joinedRefs.add(minEntry.first);
-      root = newRoot;
-      // Create a Subplan on top of the new root for all the subplan refs that can be
-      // evaluated at this point.
-      // TODO: Once we have stats on nested collections, we should consider the join
-      // order in conjunction with the placement of SubplanNodes, i.e., move the creation
-      // of SubplanNodes into the join-ordering loop above.
-      root = createSubplan(root, subplanRefs, false, analyzer);
-      // assign node ids after running through the possible choices in order to end up
-      // with a dense sequence of node ids
-      if (root instanceof SubplanNode) root.getChild(0).setId(ctx_.getNextNodeId());
-      root.setId(ctx_.getNextNodeId());
-      analyzer.setAssignedConjuncts(root.getAssignedConjuncts());
-      ++i;
-    }
-
-    return root;
-  }
-
-  /**
-   * Return a plan with joins in the order of parentRefPlans (= FROM clause order).
-   * Adds coalesced SubplanNodes based on the FROM-clause order of subplanRefs.
-   */
-  private PlanNode createFromClauseJoinPlan(Analyzer analyzer,
-      List<Pair<TableRef, PlanNode>> parentRefPlans, List<SubplanRef> subplanRefs)
-      throws ImpalaException {
-    // create left-deep sequence of binary hash joins; assign node ids as we go along
-    Preconditions.checkState(!parentRefPlans.isEmpty());
-    PlanNode root = parentRefPlans.get(0).second;
-    for (int i = 1; i < parentRefPlans.size(); ++i) {
-      TableRef innerRef = parentRefPlans.get(i).first;
-      PlanNode innerPlan = parentRefPlans.get(i).second;
-      root = createJoinNode(root, innerPlan, innerRef, analyzer);
-      if (root != null) root = createSubplan(root, subplanRefs, false, analyzer);
-      if (root instanceof SubplanNode) root.getChild(0).setId(ctx_.getNextNodeId());
-      root.setId(ctx_.getNextNodeId());
-    }
-    return root;
-  }
-
-  /**
-   * Create tree of PlanNodes that implements the Select/Project/Join/Group by/Having
-   * of the selectStmt query block.
-   */
-  private PlanNode createSelectPlan(SelectStmt selectStmt, Analyzer analyzer)
-      throws ImpalaException {
-    // no from clause -> materialize the select's exprs with a UnionNode
-    if (selectStmt.getTableRefs().isEmpty()) {
-      return createConstantSelectPlan(selectStmt, analyzer);
-    }
-
-    // Slot materialization:
-    // We need to mark all slots as materialized that are needed during the execution
-    // of selectStmt, and we need to do that prior to creating plans for the TableRefs
-    // (because createTableRefNode() might end up calling computeMemLayout() on one or
-    // more TupleDescriptors, at which point all referenced slots need to be marked).
-    //
-    // For non-join predicates, slots are marked as follows:
-    // - for base table scan predicates, this is done directly by ScanNode.init(), which
-    //   can do a better job because it doesn't need to materialize slots that are only
-    //   referenced for partition pruning, for instance
-    // - for inline views, non-join predicates are pushed down, at which point the
-    //   process repeats itself.
-    selectStmt.materializeRequiredSlots(analyzer);
-
-    ArrayList<TupleId> rowTuples = Lists.newArrayList();
-    // collect output tuples of subtrees
-    for (TableRef tblRef: selectStmt.getTableRefs()) {
-      rowTuples.addAll(tblRef.getMaterializedTupleIds());
-    }
-
-    // If the selectStmt's select-project-join portion returns an empty result set
-    // create a plan that feeds the aggregation of selectStmt with an empty set.
-    // Make sure the slots of the aggregation exprs and the tuples that they reference
-    // are materialized (see IMPALA-1960). Marks all collection-typed slots referenced
-    // in this select stmt as non-materialized because they are never unnested. Note that
-    // this creates extra unused space in the tuple since the mem layout has already been
-    // computed.
-    if (analyzer.hasEmptySpjResultSet()) {
-      unmarkCollectionSlots(selectStmt);
-      PlanNode emptySetNode = new EmptySetNode(ctx_.getNextNodeId(), rowTuples);
-      emptySetNode.init(analyzer);
-      emptySetNode.setOutputSmap(selectStmt.getBaseTblSmap());
-      return createAggregationPlan(selectStmt, analyzer, emptySetNode);
-    }
-
-    AggregateInfo aggInfo = selectStmt.getAggInfo();
-    // For queries which contain partition columns only, we may use the metadata instead
-    // of table scans. This is only feasible if all materialized aggregate expressions
-    // have distinct semantics. Please see createHdfsScanPlan() for details.
-    boolean fastPartitionKeyScans =
-        analyzer.getQueryCtx().getRequest().query_options.optimize_partition_key_scans &&
-        aggInfo != null && aggInfo.hasAllDistinctAgg();
-
-    // Separate table refs into parent refs (uncorrelated or absolute) and
-    // subplan refs (correlated or relative), and generate their plan.
-    List<TableRef> parentRefs = Lists.newArrayList();
-    List<SubplanRef> subplanRefs = Lists.newArrayList();
-    computeParentAndSubplanRefs(
-        selectStmt.getTableRefs(), analyzer.isStraightJoin(), parentRefs, subplanRefs);
-    PlanNode root = createTableRefsPlan(parentRefs, subplanRefs, fastPartitionKeyScans,
-        analyzer);
-    // add aggregation, if any
-    if (aggInfo != null) root = createAggregationPlan(selectStmt, analyzer, root);
-
-    // All the conjuncts_ should be assigned at this point.
-    // TODO: Re-enable this check here and/or elswehere.
-    //Preconditions.checkState(!analyzer.hasUnassignedConjuncts());
-    return root;
-  }
-
-  /**
-   * Holds a table ref that must be evaluated inside a subplan (i.e., a relative or
-   * correlated ref), along with the materialized tuple ids and table ref ids that
-   * are required for this table ref to be correctly evaluated inside a SubplanNode.
-   *
-   * Required materialized tuple ids:
-   * These ensure that the SubplanNode evaluating this table ref is placed only once all
-   * root tuples needed by this table ref or relative refs contained in this table ref
-   * are materialized.
-   *
-   * Required table ref ids:
-   * These ensure that the SubplanNode evaluating this table ref is placed correctly
-   * with respect to join ordering, in particular, that the SubplanNode is not ordered
-   * across semi/outer joins.
-   */
-  private static class SubplanRef {
-    // Relative or correlated table ref.
-    public final TableRef tblRef;
-
-    // List of tuple ids that must be materialized before 'tblRef' can be
-    // correctly evaluated inside a SubplanNode.
-    public final List<TupleId> requiredTids;
-
-    // List of table ref ids that a plan tree must contain before 'tblRef'
-    // can be correctly evaluated inside a SubplanNode.
-    public final List<TupleId> requiredTblRefIds;
-
-    public SubplanRef(TableRef tblRef, List<TupleId> requiredTids,
-        List<TupleId> requiredTblRefIds) {
-      Preconditions.checkState(tblRef.isRelative() || tblRef.isCorrelated());
-      this.tblRef = tblRef;
-      this.requiredTids = requiredTids;
-      this.requiredTblRefIds = requiredTblRefIds;
-    }
-  }
-
-  /**
-   * Separates tblRefs into the following two lists.
-   *
-   * parentRefs:
-   * Uncorrelated and non-relative table refs. These are the 'regular' table refs whose
-   * plans are connected by join nodes, and are not placed inside a Subplan. The returned
-   * parentRefs are self-contained with respect to TableRef linking, i.e., each returned
-   * TableRef has its left TableRef link set to the TableRef preceding it in parentRefs.
-   *
-   * subplanRefs:
-   * Correlated and relative table refs. The plan of such refs must be put inside a
-   * Subplan. See SubplanRef for details. The left TableRef link of the TableRefs in
-   * returned SubplanRefs are set to null.
-   * If isStraightJoin is true, then the required tuple ids and table ref ids of a
-   * correlated or relative ref are simply those of all table refs preceding it in
-   * the FROM-clause order.
-   *
-   * If this function is called when generating the right-hand side of a SubplanNode,
-   * then correlated and relative table refs that require only tuples produced by the
-   * SubplanNode's input are placed inside parentRefs.
-   */
-  private void computeParentAndSubplanRefs(List<TableRef> tblRefs,
-      boolean isStraightJoin, List<TableRef> parentRefs, List<SubplanRef> subplanRefs) {
-    // List of table ref ids materialized so far during plan generation, including those
-    // from the subplan context, if any. We append the ids of table refs placed into
-    // parentRefs to this list to satisfy the ordering requirement of subsequent
-    // table refs that should also be put into parentRefs. Consider this example:
-    // FROM t, (SELECT ... FROM t.c1 LEFT JOIN t.c2 ON(...) JOIN t.c3 ON (...)) v
-    // Table ref t.c3 has an ordering dependency on t.c2 due to the outer join, but t.c3
-    // must be placed into the subplan that materializes t.c1 and t.c2.
-    List<TupleId> planTblRefIds = Lists.newArrayList();
-
-    // List of materialized tuple ids in the subplan context, if any. This list must
-    // remain constant in this function because the subplan context is fixed. Any
-    // relative or correlated table ref that requires a materialized tuple id produced
-    // by an element in tblRefs should be placed into subplanRefs because it requires
-    // a new subplan context. Otherwise, it should be placed into parentRefs.
-    List<TupleId> subplanTids = Collections.emptyList();
-
-    if (ctx_.hasSubplan()) {
-      // Add all table ref ids from the subplan context.
-      planTblRefIds.addAll(ctx_.getSubplan().getChild(0).getTblRefIds());
-      subplanTids =
-          Collections.unmodifiableList(ctx_.getSubplan().getChild(0).getTupleIds());
-    }
-
-    // Table ref representing the last outer or semi join we have seen.
-    TableRef lastSemiOrOuterJoin = null;
-    for (TableRef ref: tblRefs) {
-      boolean isParentRef = true;
-      if (ref.isRelative() || ref.isCorrelated()) {
-        List<TupleId> requiredTids = Lists.newArrayList();
-        List<TupleId> requiredTblRefIds = Lists.newArrayList();
-        if (ref.isCorrelated()) {
-          requiredTids.addAll(ref.getCorrelatedTupleIds());
-        } else {
-          CollectionTableRef collectionTableRef = (CollectionTableRef) ref;
-          requiredTids.add(collectionTableRef.getResolvedPath().getRootDesc().getId());
-        }
-        // Add all plan table ref ids as an ordering dependency for straight_join.
-        if (isStraightJoin) requiredTblRefIds.addAll(planTblRefIds);
-        if (lastSemiOrOuterJoin != null) {
-          // Prevent incorrect join re-ordering across outer/semi joins by requiring all
-          // table ref ids to the left and including the last outer/semi join.
-          // TODO: Think about when we can allow re-ordering across semi/outer joins
-          // in subplans.
-          requiredTblRefIds.addAll(lastSemiOrOuterJoin.getAllTableRefIds());
-        }
-        if (!subplanTids.containsAll(requiredTids)) {
-          isParentRef = false;
-          // Outer and semi joins are placed at a fixed position in the join order.
-          // They require that all tables to their left are materialized.
-          if (ref.getJoinOp().isOuterJoin() || ref.getJoinOp().isSemiJoin()) {
-            requiredTblRefIds.addAll(ref.getAllTableRefIds());
-            requiredTblRefIds.remove(ref.getId());
-          }
-          subplanRefs.add(new SubplanRef(ref, requiredTids, requiredTblRefIds));
-        }
-      }
-      if (isParentRef) {
-        parentRefs.add(ref);
-        planTblRefIds.add(ref.getId());
-      }
-      if (ref.getJoinOp().isOuterJoin() || ref.getJoinOp().isSemiJoin()) {
-        lastSemiOrOuterJoin = ref;
-      }
-    }
-    Preconditions.checkState(tblRefs.size() == parentRefs.size() + subplanRefs.size());
-
-    // Fix the chain of parent table refs and set the left table of all subplanRefs to
-    // null. This step needs to be done outside of the loop above because the left links
-    // are required for getAllTupleIds() used for determining the requiredTblRefIds.
-    parentRefs.get(0).setLeftTblRef(null);
-    for (int i = 1; i < parentRefs.size(); ++i) {
-      parentRefs.get(i).setLeftTblRef(parentRefs.get(i - 1));
-    }
-    for (SubplanRef subplanRef: subplanRefs) subplanRef.tblRef.setLeftTblRef(null);
-  }
-
-  /**
-   * Returns a plan tree for evaluating the given parentRefs and subplanRefs.
-   *
-   * 'fastPartitionKeyScans' indicates whether to try to produce slots with
-   * metadata instead of table scans.
-   */
-  private PlanNode createTableRefsPlan(List<TableRef> parentRefs,
-      List<SubplanRef> subplanRefs, boolean fastPartitionKeyScans,
-      Analyzer analyzer) throws ImpalaException {
-    // create plans for our table refs; use a list here instead of a map to
-    // maintain a deterministic order of traversing the TableRefs during join
-    // plan generation (helps with tests)
-    List<Pair<TableRef, PlanNode>> parentRefPlans = Lists.newArrayList();
-    for (TableRef ref: parentRefs) {
-      PlanNode root = createTableRefNode(ref, fastPartitionKeyScans, analyzer);
-      Preconditions.checkNotNull(root);
-      root = createSubplan(root, subplanRefs, true, analyzer);
-      parentRefPlans.add(new Pair<TableRef, PlanNode>(ref, root));
-    }
-    // save state of conjunct assignment; needed for join plan generation
-    for (Pair<TableRef, PlanNode> entry: parentRefPlans) {
-      entry.second.setAssignedConjuncts(analyzer.getAssignedConjuncts());
-    }
-
-    PlanNode root = null;
-    if (!analyzer.isStraightJoin()) {
-      Set<ExprId> assignedConjuncts = analyzer.getAssignedConjuncts();
-      root = createCheapestJoinPlan(analyzer, parentRefPlans, subplanRefs);
-      // If createCheapestJoinPlan() failed to produce an executable plan, then we need
-      // to restore the original state of conjunct assignment for the straight-join plan
-      // to not incorrectly miss conjuncts.
-      if (root == null) analyzer.setAssignedConjuncts(assignedConjuncts);
-    }
-    if (analyzer.isStraightJoin() || root == null) {
-      // we didn't have enough stats to do a cost-based join plan, or the STRAIGHT_JOIN
-      // keyword was in the select list: use the FROM clause order instead
-      root = createFromClauseJoinPlan(analyzer, parentRefPlans, subplanRefs);
-      Preconditions.checkNotNull(root);
-    }
-    return root;
-  }
-
-  /**
-   * Places a SubplanNode on top of 'root' that evaluates all the subplan refs that can
-   * be correctly evaluated from 'root's materialized tuple ids. Returns 'root' if there
-   * are no applicable subplan refs.
-   * Assigns the returned SubplanNode a new node id unless assignId is false.
-   *
-   * If applicable, the SubplanNode is created as follows:
-   * - 'root' is the input of the SubplanNode (first child)
-   * - the second child is the plan tree generated from these table refs:
-   *   1. a SingularRowSrcTableRef that represents the current row being processed
-   *      by the SubplanNode to be joined with
-   *   2. all applicable subplan refs
-   * - the second child plan tree is generated as usual with createTableRefsPlan()
-   * - the plans of the applicable subplan refs are generated as usual, without a
-   *   SingularRowSrcTableRef
-   * - nested SubplanNodes are generated recursively inside createTableRefsPlan() by
-   *   passing in the remaining subplanRefs that are not applicable after 'root'; some
-   *   of those subplanRefs may become applicable inside the second child plan tree of
-   *   the SubplanNode generated here
-   */
-  private PlanNode createSubplan(PlanNode root, List<SubplanRef> subplanRefs,
-      boolean assignId, Analyzer analyzer) throws ImpalaException {
-    Preconditions.checkNotNull(root);
-    List<TableRef> applicableRefs = extractApplicableRefs(root, subplanRefs);
-    if (applicableRefs.isEmpty()) return root;
-
-    // Prepend a SingularRowSrcTableRef representing the current row being processed
-    // by the SubplanNode from its input (first child).
-    Preconditions.checkState(applicableRefs.get(0).getLeftTblRef() == null);
-    applicableRefs.add(0, new SingularRowSrcTableRef(root));
-    applicableRefs.get(1).setLeftTblRef(applicableRefs.get(0));
-
-    // Construct an incomplete SubplanNode that only knows its input so we can push it
-    // into the planner context. The subplan is set after the subplan tree has been
-    // constructed.
-    SubplanNode subplanNode = new SubplanNode(root);
-    if (assignId) subplanNode.setId(ctx_.getNextNodeId());
-
-    // Push the SubplanNode such that UnnestNodes and SingularRowSrcNodes can pick up
-    // their containing SubplanNode. Also, further plan generation relies on knowing
-    // whether we are in a subplan context or not (see computeParentAndSubplanRefs()).
-    ctx_.pushSubplan(subplanNode);
-    PlanNode subplan = createTableRefsPlan(applicableRefs, subplanRefs, false, analyzer);
-    ctx_.popSubplan();
-    subplanNode.setSubplan(subplan);
-    subplanNode.init(analyzer);
-    return subplanNode;
-  }
-
-  /**
-   * Returns a new list with all table refs from subplanRefs that can be correctly
-   * evaluated inside a SubplanNode placed after the given plan root.
-   * The returned table refs have their left-table links properly set, and the
-   * corresponding SubplanRefs are removed from subplanRefs.
-   */
-  private List<TableRef> extractApplicableRefs(PlanNode root,
-      List<SubplanRef> subplanRefs) {
-    // List of table ref ids in 'root' as well as the table ref ids of all table refs
-    // placed in 'subplanRefs' so far.
-    List<TupleId> tblRefIds = Lists.newArrayList(root.getTblRefIds());
-    List<TableRef> result = Lists.newArrayList();
-    Iterator<SubplanRef> subplanRefIt = subplanRefs.iterator();
-    TableRef leftTblRef = null;
-    while (subplanRefIt.hasNext()) {
-      SubplanRef subplanRef = subplanRefIt.next();
-      // Ensure that 'root' materializes all required tuples (first condition), and that
-      // correct join ordering is obeyed (second condition).
-      if (root.getTupleIds().containsAll(subplanRef.requiredTids) &&
-          tblRefIds.containsAll(subplanRef.requiredTblRefIds)) {
-        subplanRef.tblRef.setLeftTblRef(leftTblRef);
-        result.add(subplanRef.tblRef);
-        leftTblRef = subplanRef.tblRef;
-        subplanRefIt.remove();
-        // Add the table ref id such that other subplan refs that can be evaluated inside
-        // the same SubplanNode but only after this ref are returned as well.
-        tblRefIds.add(subplanRef.tblRef.getId());
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns a new AggregationNode that materializes the aggregation of the given stmt.
-   * Assigns conjuncts from the Having clause to the returned node.
-   */
-  private PlanNode createAggregationPlan(SelectStmt selectStmt, Analyzer analyzer,
-      PlanNode root) throws ImpalaException {
-    Preconditions.checkState(selectStmt.getAggInfo() != null);
-    // add aggregation, if required
-    AggregateInfo aggInfo = selectStmt.getAggInfo();
-    root = new AggregationNode(ctx_.getNextNodeId(), root, aggInfo);
-    root.init(analyzer);
-    Preconditions.checkState(root.hasValidStats());
-    // if we're computing DISTINCT agg fns, the analyzer already created the
-    // 2nd phase agginfo
-    if (aggInfo.isDistinctAgg()) {
-      ((AggregationNode)root).unsetNeedsFinalize();
-      // The output of the 1st phase agg is the 1st phase intermediate.
-      ((AggregationNode)root).setIntermediateTuple();
-      root = new AggregationNode(ctx_.getNextNodeId(), root,
-          aggInfo.getSecondPhaseDistinctAggInfo());
-      root.init(analyzer);
-      Preconditions.checkState(root.hasValidStats());
-    }
-    // add Having clause
-    root.assignConjuncts(analyzer);
-    return root;
-  }
-
- /**
-  * Returns a UnionNode that materializes the exprs of the constant selectStmt.
-  * Replaces the resultExprs of the selectStmt with SlotRefs into the materialized tuple.
-  */
-  private PlanNode createConstantSelectPlan(SelectStmt selectStmt, Analyzer analyzer)
-      throws InternalException {
-    Preconditions.checkState(selectStmt.getTableRefs().isEmpty());
-    ArrayList<Expr> resultExprs = selectStmt.getResultExprs();
-    // Create tuple descriptor for materialized tuple.
-    TupleDescriptor tupleDesc = createResultTupleDescriptor(selectStmt, "union", analyzer);
-    UnionNode unionNode = new UnionNode(ctx_.getNextNodeId(), tupleDesc.getId());
-    // Analysis guarantees that selects without a FROM clause only have constant exprs.
-    unionNode.addConstExprList(Lists.newArrayList(resultExprs));
-
-    // Replace the select stmt's resultExprs with SlotRefs into tupleDesc.
-    for (int i = 0; i < resultExprs.size(); ++i) {
-      SlotRef slotRef = new SlotRef(tupleDesc.getSlots().get(i));
-      resultExprs.set(i, slotRef);
-    }
-    // UnionNode.init() needs tupleDesc to have been initialized
-    unionNode.init(analyzer);
-    return unionNode;
-  }
-
-  /**
-   * Create tuple descriptor that can hold the results of the given SelectStmt, with one
-   * slot per result expr.
-   */
-  private TupleDescriptor createResultTupleDescriptor(SelectStmt selectStmt,
-      String debugName, Analyzer analyzer) {
-    TupleDescriptor tupleDesc = analyzer.getDescTbl().createTupleDescriptor(
-        debugName);
-    tupleDesc.setIsMaterialized(true);
-
-    ArrayList<Expr> resultExprs = selectStmt.getResultExprs();
-    ArrayList<String> colLabels = selectStmt.getColLabels();
-    for (int i = 0; i < resultExprs.size(); ++i) {
-      Expr resultExpr = resultExprs.get(i);
-      String colLabel = colLabels.get(i);
-      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(tupleDesc);
-      slotDesc.setLabel(colLabel);
-      slotDesc.setSourceExpr(resultExpr);
-      slotDesc.setType(resultExpr.getType());
-      slotDesc.setStats(ColumnStats.fromExpr(resultExpr));
-      slotDesc.setIsMaterialized(true);
-    }
-    tupleDesc.computeMemLayout();
-    return tupleDesc;
-  }
-
-  /**
-   * Transform '=', '<[=]' and '>[=]' comparisons for given slot into
-   * ValueRange. Also removes those predicates which were used for the construction
-   * of ValueRange from 'conjuncts_'. Only looks at comparisons w/ string constants
-   * (ie, the bounds of the result can be evaluated with Expr::GetValue(NULL)).
-   * HBase row key filtering works only if the row key is mapped to a string column and
-   * the expression is a string constant expression.
-   * If there are multiple competing comparison predicates that could be used
-   * to construct a ValueRange, only the first one from each category is chosen.
-   */
-  private ValueRange createHBaseValueRange(SlotDescriptor d, List<Expr> conjuncts) {
-    ListIterator<Expr> i = conjuncts.listIterator();
-    ValueRange result = null;
-    while (i.hasNext()) {
-      Expr e = i.next();
-      if (!(e instanceof BinaryPredicate)) continue;
-      BinaryPredicate comp = (BinaryPredicate) e;
-      if ((comp.getOp() == BinaryPredicate.Operator.NE)
-          || (comp.getOp() == BinaryPredicate.Operator.DISTINCT_FROM)
-          || (comp.getOp() == BinaryPredicate.Operator.NOT_DISTINCT)) {
-        continue;
-      }
-      Expr slotBinding = comp.getSlotBinding(d.getId());
-      if (slotBinding == null || !slotBinding.isConstant() ||
-          !slotBinding.getType().equals(Type.STRING)) {
-        continue;
-      }
-
-      if (comp.getOp() == BinaryPredicate.Operator.EQ) {
-        i.remove();
-        return ValueRange.createEqRange(slotBinding);
-      }
-
-      if (result == null) result = new ValueRange();
-
-      // TODO: do we need copies here?
-      if (comp.getOp() == BinaryPredicate.Operator.GT
-          || comp.getOp() == BinaryPredicate.Operator.GE) {
-        if (result.getLowerBound() == null) {
-          result.setLowerBound(slotBinding);
-          result.setLowerBoundInclusive(comp.getOp() == BinaryPredicate.Operator.GE);
-          i.remove();
-        }
-      } else {
-        if (result.getUpperBound() == null) {
-          result.setUpperBound(slotBinding);
-          result.setUpperBoundInclusive(comp.getOp() == BinaryPredicate.Operator.LE);
-          i.remove();
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns plan tree for an inline view ref:
-   * - predicates from the enclosing scope that can be evaluated directly within
-   *   the inline-view plan are pushed down
-   * - predicates that cannot be evaluated directly within the inline-view plan
-   *   but only apply to the inline view are evaluated in a SelectNode placed
-   *   on top of the inline view plan
-   * - all slots that are referenced by predicates from the enclosing scope that cannot
-   *   be pushed down are marked as materialized (so that when computeMemLayout() is
-   *   called on the base table descriptors materialized by the inline view it has a
-   *   complete picture)
-   */
-  private PlanNode createInlineViewPlan(Analyzer analyzer, InlineViewRef inlineViewRef)
-      throws ImpalaException {
-    // If possible, "push down" view predicates; this is needed in order to ensure
-    // that predicates such as "x + y = 10" are evaluated in the view's plan tree
-    // rather than a SelectNode grafted on top of that plan tree.
-    // This doesn't prevent predicate propagation, because predicates like
-    // "x = 10" that get pushed down are still connected to equivalent slots
-    // via the equality predicates created for the view's select list.
-    // Include outer join conjuncts here as well because predicates from the
-    // On-clause of an outer join may be pushed into the inline view as well.
-    migrateConjunctsToInlineView(analyzer, inlineViewRef);
-
-    // Turn a constant select into a UnionNode that materializes the exprs.
-    // TODO: unify this with createConstantSelectPlan(), this is basically the
-    // same thing
-    QueryStmt viewStmt = inlineViewRef.getViewStmt();
-    if (viewStmt instanceof SelectStmt) {
-      SelectStmt selectStmt = (SelectStmt) viewStmt;
-      if (selectStmt.getTableRefs().isEmpty()) {
-        if (inlineViewRef.getAnalyzer().hasEmptyResultSet()) {
-          PlanNode emptySetNode = createEmptyNode(viewStmt, inlineViewRef.getAnalyzer());
-          // Still substitute exprs in parent nodes with the inline-view's smap to make
-          // sure no exprs reference the non-materialized inline view slots. No wrapping
-          // with TupleIsNullPredicates is necessary here because we do not migrate
-          // conjuncts into outer-joined inline views, so hasEmptyResultSet() cannot be
-          // true for an outer-joined inline view that has no table refs.
-          Preconditions.checkState(!analyzer.isOuterJoined(inlineViewRef.getId()));
-          emptySetNode.setOutputSmap(inlineViewRef.getSmap());
-          return emptySetNode;
-        }
-        // Analysis should have generated a tuple id into which to materialize the exprs.
-        Preconditions.checkState(inlineViewRef.getMaterializedTupleIds().size() == 1);
-        // we need to materialize all slots of our inline view tuple
-        analyzer.getTupleDesc(inlineViewRef.getId()).materializeSlots();
-        UnionNode unionNode = new UnionNode(ctx_.getNextNodeId(),
-            inlineViewRef.getMaterializedTupleIds().get(0));
-        if (analyzer.hasEmptyResultSet()) return unionNode;
-        unionNode.setTblRefIds(Lists.newArrayList(inlineViewRef.getId()));
-        unionNode.addConstExprList(selectStmt.getBaseTblResultExprs());
-        unionNode.init(analyzer);
-        return unionNode;
-      }
-    }
-
-    PlanNode rootNode =
-        createQueryPlan(inlineViewRef.getViewStmt(), inlineViewRef.getAnalyzer(), false);
-    // TODO: we should compute the "physical layout" of the view's descriptor, so that
-    // the avg row size is available during optimization; however, that means we need to
-    // select references to its resultExprs from the enclosing scope(s)
-    rootNode.setTblRefIds(Lists.newArrayList(inlineViewRef.getId()));
-
-    // The output smap is the composition of the inline view's smap and the output smap
-    // of the inline view's plan root. This ensures that all downstream exprs referencing
-    // the inline view are replaced with exprs referencing the physical output of the
-    // inline view's plan.
-    ExprSubstitutionMap outputSmap = ExprSubstitutionMap.compose(
-        inlineViewRef.getSmap(), rootNode.getOutputSmap(), analyzer);
-    if (analyzer.isOuterJoined(inlineViewRef.getId())) {
-      // Exprs against non-matched rows of an outer join should always return NULL.
-      // Make the rhs exprs of the output smap nullable, if necessary. This expr wrapping
-      // must be performed on the composed smap, and not on the the inline view's smap,
-      // because the rhs exprs must first be resolved against the physical output of
-      // 'planRoot' to correctly determine whether wrapping is necessary.
-      List<Expr> nullableRhs = TupleIsNullPredicate.wrapExprs(
-          outputSmap.getRhs(), rootNode.getTupleIds(), analyzer);
-      outputSmap = new ExprSubstitutionMap(outputSmap.getLhs(), nullableRhs);
-    }
-    // Set output smap of rootNode *before* creating a SelectNode for proper resolution.
-    rootNode.setOutputSmap(outputSmap);
-
-    // If the inline view has a LIMIT/OFFSET or unassigned conjuncts due to analytic
-    // functions, we may have conjuncts that need to be assigned to a SELECT node on
-    // top of the current plan root node.
-    //
-    // TODO: This check is also repeated in migrateConjunctsToInlineView() because we
-    // need to make sure that equivalences are not enforced multiple times. Consolidate
-    // the assignment of conjuncts and the enforcement of equivalences into a single
-    // place.
-    if (!canMigrateConjuncts(inlineViewRef)) {
-      rootNode = addUnassignedConjuncts(
-          analyzer, inlineViewRef.getDesc().getId().asList(), rootNode);
-    }
-    return rootNode;
-  }
-
-  /**
-   * Migrates unassigned conjuncts into an inline view. Conjuncts are not
-   * migrated into the inline view if the view has a LIMIT/OFFSET clause or if the
-   * view's stmt computes analytic functions (see IMPALA-1243/IMPALA-1900).
-   * The reason is that analytic functions compute aggregates over their entire input,
-   * and applying filters from the enclosing scope *before* the aggregate computation
-   * would alter the results. This is unlike regular aggregate computation, which only
-   * makes the *output* of the computation visible to the enclosing scope, so that
-   * filters from the enclosing scope can be safely applied (to the grouping cols, say).
-   */
-  public void migrateConjunctsToInlineView(Analyzer analyzer,
-      InlineViewRef inlineViewRef) throws ImpalaException {
-    List<Expr> unassignedConjuncts =
-        analyzer.getUnassignedConjuncts(inlineViewRef.getId().asList(), true);
-    if (!canMigrateConjuncts(inlineViewRef)) {
-      // mark (fully resolve) slots referenced by unassigned conjuncts as
-      // materialized
-      List<Expr> substUnassigned = Expr.substituteList(unassignedConjuncts,
-          inlineViewRef.getBaseTblSmap(), analyzer, false);
-      analyzer.materializeSlots(substUnassigned);
-      return;
-    }
-
-    List<Expr> preds = Lists.newArrayList();
-    for (Expr e: unassignedConjuncts) {
-      if (analyzer.canEvalPredicate(inlineViewRef.getId().asList(), e)) {
-        preds.add(e);
-      }
-    }
-    unassignedConjuncts.removeAll(preds);
-    // Generate predicates to enforce equivalences among slots of the inline view
-    // tuple. These predicates are also migrated into the inline view.
-    analyzer.createEquivConjuncts(inlineViewRef.getId(), preds);
-
-    // create new predicates against the inline view's unresolved result exprs, not
-    // the resolved result exprs, in order to avoid skipping scopes (and ignoring
-    // limit clauses on the way)
-    List<Expr> viewPredicates =
-        Expr.substituteList(preds, inlineViewRef.getSmap(), analyzer, false);
-
-    // Remove unregistered predicates that reference the same slot on
-    // both sides (e.g. a = a). Such predicates have been generated from slot
-    // equivalences and may incorrectly reject rows with nulls (IMPALA-1412/IMPALA-2643).
-    Predicate<Expr> isIdentityPredicate = new Predicate<Expr>() {
-      @Override
-      public boolean apply(Expr expr) {
-        return com.cloudera.impala.analysis.Predicate.isEquivalencePredicate(expr)
-            && ((BinaryPredicate) expr).isInferred()
-            && expr.getChild(0).equals(expr.getChild(1));
-      }
-    };
-    Iterables.removeIf(viewPredicates, isIdentityPredicate);
-
-    // Migrate the conjuncts by marking the original ones as assigned, and
-    // re-registering the substituted ones with new ids.
-    analyzer.markConjunctsAssigned(preds);
-    // Unset the On-clause flag of the migrated conjuncts because the migrated conjuncts
-    // apply to the post-join/agg/analytic result of the inline view.
-    for (Expr e: viewPredicates) e.setIsOnClauseConjunct(false);
-    inlineViewRef.getAnalyzer().registerConjuncts(viewPredicates);
-
-    // mark (fully resolve) slots referenced by remaining unassigned conjuncts as
-    // materialized
-    List<Expr> substUnassigned = Expr.substituteList(unassignedConjuncts,
-        inlineViewRef.getBaseTblSmap(), analyzer, false);
-    analyzer.materializeSlots(substUnassigned);
-  }
-
-  /**
-   * Checks if conjuncts can be migrated into an inline view.
-   */
-  private boolean canMigrateConjuncts(InlineViewRef inlineViewRef) {
-    return !inlineViewRef.getViewStmt().hasLimit()
-        && !inlineViewRef.getViewStmt().hasOffset()
-        && (!(inlineViewRef.getViewStmt() instanceof SelectStmt)
-            || !((SelectStmt) inlineViewRef.getViewStmt()).hasAnalyticInfo());
-  }
-
-  /**
-   * Create a node to materialize the slots in the given HdfsTblRef.
-   *
-   * If 'hdfsTblRef' only contains partition columns and 'fastPartitionKeyScans'
-   * is true, the slots may be produced directly in this function using the metadata.
-   * Otherwise, a HdfsScanNode will be created.
-   */
-  private PlanNode createHdfsScanPlan(TableRef hdfsTblRef, boolean fastPartitionKeyScans,
-      Analyzer analyzer) throws ImpalaException {
-    HdfsTable hdfsTable = (HdfsTable)hdfsTblRef.getTable();
-    TupleDescriptor tupleDesc = hdfsTblRef.getDesc();
-
-    // Get all predicates bound by the tuple.
-    List<Expr> conjuncts = Lists.newArrayList();
-    conjuncts.addAll(analyzer.getBoundPredicates(tupleDesc.getId()));
-
-    // Also add remaining unassigned conjuncts
-    List<Expr> unassigned = analyzer.getUnassignedConjuncts(tupleDesc.getId().asList());
-    conjuncts.addAll(unassigned);
-    analyzer.markConjunctsAssigned(unassigned);
-
-    analyzer.createEquivConjuncts(tupleDesc.getId(), conjuncts);
-
-    // Do partition pruning before deciding which slots to materialize,
-    // We might end up removing some predicates.
-    HdfsPartitionPruner pruner = new HdfsPartitionPruner(tupleDesc);
-    List<HdfsPartition> partitions = pruner.prunePartitions(analyzer, conjuncts);
-
-    // Mark all slots referenced by the remaining conjuncts as materialized.
-    analyzer.materializeSlots(conjuncts);
-
-    // If the optimization for partition key scans with metadata is enabled,
-    // try evaluating with metadata first. If not, fall back to scanning.
-    if (fastPartitionKeyScans && tupleDesc.hasClusteringColsOnly()) {
-      HashSet<List<Expr>> uniqueExprs = new HashSet<List<Expr>>();
-
-      for (HdfsPartition partition: partitions) {
-        // Ignore empty partitions to match the behavior of the scan based approach.
-        if (partition.isDefaultPartition() || partition.getSize() == 0) {
-          continue;
-        }
-        List<Expr> exprs = Lists.newArrayList();
-        for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-          // UnionNode.init() will go through all the slots in the tuple descriptor so
-          // there needs to be an entry in 'exprs' for each slot. For unmaterialized
-          // slots, use dummy null values. UnionNode will filter out unmaterialized slots.
-          if (!slotDesc.isMaterialized()) {
-            exprs.add(NullLiteral.create(slotDesc.getType()));
-          } else {
-            int pos = slotDesc.getColumn().getPosition();
-            exprs.add(partition.getPartitionValue(pos));
-          }
-        }
-        uniqueExprs.add(exprs);
-      }
-
-      // Create a UNION node with all unique partition keys.
-      UnionNode unionNode = new UnionNode(ctx_.getNextNodeId(), tupleDesc.getId());
-      for (List<Expr> exprList: uniqueExprs) {
-        unionNode.addConstExprList(exprList);
-      }
-      unionNode.init(analyzer);
-      return unionNode;
-    } else {
-      ScanNode scanNode =
-          new HdfsScanNode(ctx_.getNextNodeId(), tupleDesc, conjuncts, partitions,
-              hdfsTblRef);
-      scanNode.init(analyzer);
-      return scanNode;
-    }
-  }
-
-  /**
-   * Create node for scanning all data files of a particular table.
-   *
-   * 'fastPartitionKeyScans' indicates whether to try to produce the slots with
-   * metadata instead of table scans. Only applicable to HDFS tables.
-   *
-   * Throws if a PlanNode.init() failed or if planning of the given
-   * table ref is not implemented.
-   */
-  private PlanNode createScanNode(TableRef tblRef, boolean fastPartitionKeyScans,
-      Analyzer analyzer) throws ImpalaException {
-    ScanNode scanNode = null;
-    Table table = tblRef.getTable();
-    if (table instanceof HdfsTable) {
-      return createHdfsScanPlan(tblRef, fastPartitionKeyScans, analyzer);
-    } else if (table instanceof DataSourceTable) {
-      scanNode = new DataSourceScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
-      scanNode.init(analyzer);
-      return scanNode;
-    } else if (table instanceof HBaseTable) {
-      // HBase table
-      scanNode = new HBaseScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
-    } else if (tblRef.getTable() instanceof KuduTable) {
-      scanNode = new KuduScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
-      scanNode.init(analyzer);
-      return scanNode;
-    } else {
-      throw new NotImplementedException(
-          "Planning not implemented for table ref class: " + tblRef.getClass());
-    }
-    // TODO: move this to HBaseScanNode.init();
-    Preconditions.checkState(scanNode instanceof HBaseScanNode);
-
-    List<Expr> conjuncts = analyzer.getUnassignedConjuncts(scanNode);
-    // mark conjuncts_ assigned here; they will either end up inside a
-    // ValueRange or will be evaluated directly by the node
-    analyzer.markConjunctsAssigned(conjuncts);
-    List<ValueRange> keyRanges = Lists.newArrayList();
-    // determine scan predicates for clustering cols
-    for (int i = 0; i < tblRef.getTable().getNumClusteringCols(); ++i) {
-      SlotDescriptor slotDesc = analyzer.getColumnSlot(
-          tblRef.getDesc(), tblRef.getTable().getColumns().get(i));
-      if (slotDesc == null || !slotDesc.getType().isStringType()) {
-        // the hbase row key is mapped to a non-string type
-        // (since it's stored in ascii it will be lexicographically ordered,
-        // and non-string comparisons won't work)
-        keyRanges.add(null);
-      } else {
-        // create ValueRange from conjuncts_ for slot; also removes conjuncts_ that were
-        // used as input for filter
-        keyRanges.add(createHBaseValueRange(slotDesc, conjuncts));
-      }
-    }
-
-    ((HBaseScanNode)scanNode).setKeyRanges(keyRanges);
-    scanNode.addConjuncts(conjuncts);
-    scanNode.init(analyzer);
-
-    return scanNode;
-  }
-
-  /**
-   * Returns all applicable conjuncts for join between two plan trees 'materializing' the
-   * given left-hand and right-hand side table ref ids. The conjuncts either come from
-   * the analyzer or are generated based on equivalence classes, if necessary. The
-   * returned conjuncts are marked as assigned.
-   * The conjuncts can be used for hash table lookups.
-   * - for inner joins, those are equi-join predicates in which one side is fully bound
-   *   by lhsTblRefIds and the other by rhsTblRefIds
-   * - for outer joins: same type of conjuncts as inner joins, but only from the
-   *   ON or USING clause
-   * Predicates that are redundant based on equivalence classes are intentionally
-   * returneded by this function because the removal of redundant predicates and the
-   * creation of new predicates for enforcing slot equivalences go hand-in-hand
-   * (see analyzer.createEquivConjuncts()).
-   */
-  private List<BinaryPredicate> getHashLookupJoinConjuncts(
-      List<TupleId> lhsTblRefIds, List<TupleId> rhsTblRefIds, Analyzer analyzer) {
-    List<BinaryPredicate> result = Lists.newArrayList();
-    List<Expr> candidates = analyzer.getEqJoinConjuncts(lhsTblRefIds, rhsTblRefIds);
-    Preconditions.checkNotNull(candidates);
-    for (Expr e: candidates) {
-      if (!(e instanceof BinaryPredicate)) continue;
-      BinaryPredicate normalizedJoinConjunct =
-          getNormalizedEqPred(e, lhsTblRefIds, rhsTblRefIds, analyzer);
-      if (normalizedJoinConjunct == null) continue;
-      analyzer.markConjunctAssigned(e);
-      result.add(normalizedJoinConjunct);
-    }
-    if (!result.isEmpty()) return result;
-
-    // Construct join conjuncts derived from equivalence class membership.
-    for (TupleId rhsId: rhsTblRefIds) {
-      TableRef rhsTblRef = analyzer.getTableRef(rhsId);
-      Preconditions.checkNotNull(rhsTblRef);
-      for (SlotDescriptor slotDesc: rhsTblRef.getDesc().getSlots()) {
-        SlotId rhsSid = slotDesc.getId();
-        // List of slots that participate in a value transfer with rhsSid and are belong
-        // to a tuple in lhsTblRefIds. The value transfer is not necessarily mutual.
-        List<SlotId> lhsSlotIds = analyzer.getEquivSlots(rhsSid, lhsTblRefIds);
-        for (SlotId lhsSid: lhsSlotIds) {
-          // A mutual value transfer between lhsSid and rhsSid is required for correctly
-          // generating an inferred predicate. Otherwise, the predicate might incorrectly
-          // eliminate rows that would have been non-matches of an outer or anti join.
-          if (analyzer.hasMutualValueTransfer(lhsSid, rhsSid)) {
-            // construct a BinaryPredicates in order to get correct casting;
-            // we only do this for one of the equivalent slots, all the other implied
-            // equalities are redundant
-            BinaryPredicate pred =
-                analyzer.createInferredEqPred(lhsSid, rhsSid);
-            result.add(pred);
-            break;
-          }
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns a normalized version of a binary equality predicate 'expr' where the lhs
-   * child expr is bound by some tuple in 'lhsTids' and the rhs child expr is bound by
-   * some tuple in 'rhsTids'. Returns 'expr' if this predicate is already normalized.
-   * Returns null in any of the following cases:
-   * 1. It is not an equality predicate
-   * 2. One of the operands is a constant
-   * 3. Both children of this predicate are the same expr
-   * 4. Cannot be normalized
-   */
-  public static BinaryPredicate getNormalizedEqPred(Expr expr, List<TupleId> lhsTids,
-      List<TupleId> rhsTids, Analyzer analyzer) {
-    if (!(expr instanceof BinaryPredicate)) return null;
-    BinaryPredicate pred = (BinaryPredicate) expr;
-    if (!pred.getOp().isEquivalence() && pred.getOp() != Operator.NULL_MATCHING_EQ) {
-      return null;
-    }
-    if (pred.getChild(0).isConstant() || pred.getChild(1).isConstant()) return null;
-
-    Expr lhsExpr = Expr.getFirstBoundChild(pred, lhsTids);
-    Expr rhsExpr = Expr.getFirstBoundChild(pred, rhsTids);
-    if (lhsExpr == null || rhsExpr == null || lhsExpr == rhsExpr) return null;
-
-    BinaryPredicate result = new BinaryPredicate(pred.getOp(), lhsExpr, rhsExpr);
-    result.analyzeNoThrow(analyzer);
-    return result;
-  }
-
-  /**
-   * Creates a new node to join outer with inner. Collects and assigns join conjunct
-   * as well as regular conjuncts. Calls init() on the new join node.
-   * Throws if the JoinNode.init() fails.
-   */
-  private PlanNode createJoinNode(PlanNode outer, PlanNode inner,
-      TableRef innerRef, Analyzer analyzer) throws ImpalaException {
-    // get eq join predicates for the TableRefs' ids (not the PlanNodes' ids, which
-    // are materialized)
-    List<BinaryPredicate> eqJoinConjuncts = getHashLookupJoinConjuncts(
-        outer.getTblRefIds(), inner.getTblRefIds(), analyzer);
-    // Outer joins should only use On-clause predicates as eqJoinConjuncts.
-    if (!innerRef.getJoinOp().isOuterJoin()) {
-      analyzer.createEquivConjuncts(outer.getTblRefIds(), inner.getTblRefIds(),
-          eqJoinConjuncts);
-    }
-    if (!eqJoinConjuncts.isEmpty() && innerRef.getJoinOp() == JoinOperator.CROSS_JOIN) {
-      innerRef.setJoinOp(JoinOperator.INNER_JOIN);
-    }
-
-    List<Expr> otherJoinConjuncts = Lists.newArrayList();
-    if (innerRef.getJoinOp().isOuterJoin()) {
-      // Also assign conjuncts from On clause. All remaining unassigned conjuncts
-      // that can be evaluated by this join are assigned in createSelectPlan().
-      otherJoinConjuncts = analyzer.getUnassignedOjConjuncts(innerRef);
-    } else if (innerRef.getJoinOp().isSemiJoin()) {
-      // Unassigned conjuncts bound by the invisible tuple id of a semi join must have
-      // come from the join's On-clause, and therefore, must be added to the other join
-      // conjuncts to produce correct results.
-      // TODO This doesn't handle predicates specified in the On clause which are not
-      // bound by any tuple id (e.g. ON (true))
-      List<TupleId> tblRefIds = Lists.newArrayList(outer.getTblRefIds());
-      tblRefIds.addAll(inner.getTblRefIds());
-      otherJoinConjuncts = analyzer.getUnassignedConjuncts(tblRefIds, false);
-      if (innerRef.getJoinOp().isNullAwareLeftAntiJoin()) {
-        boolean hasNullMatchingEqOperator = false;
-        // Keep only the null-matching eq conjunct in the eqJoinConjuncts and move
-        // all the others in otherJoinConjuncts. The BE relies on this
-        // separation for correct execution of the null-aware left anti join.
-        Iterator<BinaryPredicate> it = eqJoinConjuncts.iterator();
-        while (it.hasNext()) {
-          BinaryPredicate conjunct = it.next();
-          if (!conjunct.isNullMatchingEq()) {
-            otherJoinConjuncts.add(conjunct);
-            it.remove();
-          } else {
-            // Only one null-matching eq conjunct is allowed
-            Preconditions.checkState(!hasNullMatchingEqOperator);
-            hasNullMatchingEqOperator = true;
-          }
-        }
-        Preconditions.checkState(hasNullMatchingEqOperator);
-      }
-    }
-    analyzer.markConjunctsAssigned(otherJoinConjuncts);
-
-    // Use a nested-loop join if there are no equi-join conjuncts, or if the inner
-    // (build side) is a singular row src. A singular row src has a cardinality of 1, so
-    // a nested-loop join is certainly cheaper than a hash join.
-    JoinNode result = null;
-    Preconditions.checkState(!innerRef.getJoinOp().isNullAwareLeftAntiJoin()
-        || !(inner instanceof SingularRowSrcNode));
-    if (eqJoinConjuncts.isEmpty() || inner instanceof SingularRowSrcNode) {
-      otherJoinConjuncts.addAll(eqJoinConjuncts);
-      result = new NestedLoopJoinNode(outer, inner, analyzer.isStraightJoin(),
-          innerRef.getDistributionMode(), innerRef.getJoinOp(), otherJoinConjuncts);
-    } else {
-      result = new HashJoinNode(outer, inner, analyzer.isStraightJoin(),
-          innerRef.getDistributionMode(), innerRef.getJoinOp(), eqJoinConjuncts,
-          otherJoinConjuncts);
-    }
-    result.init(analyzer);
-    return result;
-  }
-
-  /**
-   * Create a tree of PlanNodes for the given tblRef, which can be a BaseTableRef,
-   * CollectionTableRef or an InlineViewRef.
-   *
-   * 'fastPartitionKeyScans' indicates whether to try to produce the slots with
-   * metadata instead of table scans. Only applicable to BaseTableRef which is also
-   * an HDFS table.
-   *
-   * Throws if a PlanNode.init() failed or if planning of the given
-   * table ref is not implemented.
-   */
-  private PlanNode createTableRefNode(TableRef tblRef, boolean fastPartitionKeyScans,
-      Analyzer analyzer) throws ImpalaException {
-    PlanNode result = null;
-    if (tblRef instanceof BaseTableRef) {
-      result = createScanNode(tblRef, fastPartitionKeyScans, analyzer);
-    } else if (tblRef instanceof CollectionTableRef) {
-      if (tblRef.isRelative()) {
-        Preconditions.checkState(ctx_.hasSubplan());
-        result = new UnnestNode(ctx_.getNextNodeId(), ctx_.getSubplan(),
-            (CollectionTableRef) tblRef);
-        result.init(analyzer);
-      } else {
-        result = createScanNode(tblRef, false, analyzer);
-      }
-    } else if (tblRef instanceof InlineViewRef) {
-      result = createInlineViewPlan(analyzer, (InlineViewRef) tblRef);
-    } else if (tblRef instanceof SingularRowSrcTableRef) {
-      Preconditions.checkState(ctx_.hasSubplan());
-      result = new SingularRowSrcNode(ctx_.getNextNodeId(), ctx_.getSubplan());
-      result.init(analyzer);
-    } else {
-      throw new NotImplementedException(
-          "Planning not implemented for table ref class: " + tblRef.getClass());
-    }
-    return result;
-  }
-
-  /**
-   * Create a plan tree corresponding to 'unionOperands' for the given unionStmt.
-   * The individual operands' plan trees are attached to a single UnionNode.
-   * If unionDistinctPlan is not null, it is expected to contain the plan for the
-   * distinct portion of the given unionStmt. The unionDistinctPlan is then added
-   * as a child of the returned UnionNode.
-   */
-  private UnionNode createUnionPlan(
-      Analyzer analyzer, UnionStmt unionStmt, List<UnionOperand> unionOperands,
-      PlanNode unionDistinctPlan)
-      throws ImpalaException {
-    UnionNode unionNode = new UnionNode(ctx_.getNextNodeId(), unionStmt.getTupleId());
-    for (UnionOperand op: unionOperands) {
-      if (op.getAnalyzer().hasEmptyResultSet()) {
-        unmarkCollectionSlots(op.getQueryStmt());
-        continue;
-      }
-      QueryStmt queryStmt = op.getQueryStmt();
-      if (queryStmt instanceof SelectStmt) {
-        SelectStmt selectStmt = (SelectStmt) queryStmt;
-        if (selectStmt.getTableRefs().isEmpty()) {
-          unionNode.addConstExprList(selectStmt.getBaseTblResultExprs());
-          continue;
-        }
-      }
-      PlanNode opPlan = createQueryPlan(queryStmt, op.getAnalyzer(), false);
-      // There may still be unassigned conjuncts if the operand has an order by + limit.
-      // Place them into a SelectNode on top of the operand's plan.
-      opPlan = addUnassignedConjuncts(analyzer, opPlan.getTupleIds(), opPlan);
-      if (opPlan instanceof EmptySetNode) continue;
-      unionNode.addChild(opPlan, op.getQueryStmt().getBaseTblResultExprs());
-    }
-
-    if (unionDistinctPlan != null) {
-      Preconditions.checkState(unionStmt.hasDistinctOps());
-      Preconditions.checkState(unionDistinctPlan instanceof AggregationNode);
-      unionNode.addChild(unionDistinctPlan,
-          unionStmt.getDistinctAggInfo().getGroupingExprs());
-    }
-    unionNode.init(analyzer);
-    return unionNode;
-  }
-
-  /**
-   * Returns plan tree for unionStmt:
-   * - distinctOperands' plan trees are collected in a single UnionNode
-   *   and duplicates removed via distinct aggregation
-   * - the output of that plus the allOperands' plan trees are collected in
-   *   another UnionNode which materializes the result of unionStmt
-   * - if any of the union operands contains analytic exprs, we avoid pushing
-   *   predicates directly into the operands and instead evaluate them
-   *   *after* the final UnionNode (see createInlineViewPlan() for the reasoning)
-   *   TODO: optimize this by still pushing predicates into the union operands
-   *   that don't contain analytic exprs and evaluating the conjuncts in Select
-   *   directly above the AnalyticEvalNodes
-   * TODO: Simplify the plan of unions with empty operands using an empty set node.
-   * TODO: Simplify the plan of unions with only a single non-empty operand to not
-   *       use a union node (this is tricky because a union materializes a new tuple).
-   */
-  private PlanNode createUnionPlan(UnionStmt unionStmt, Analyzer analyzer)
-      throws ImpalaException {
-    List<Expr> conjuncts =
-        analyzer.getUnassignedConjuncts(unionStmt.getTupleId().asList(), false);
-    if (!unionStmt.hasAnalyticExprs()) {
-      // Turn unassigned predicates for unionStmt's tupleId_ into predicates for
-      // the individual operands.
-      // Do this prior to creating the operands' plan trees so they get a chance to
-      // pick up propagated predicates.
-      for (UnionOperand op: unionStmt.getOperands()) {
-        List<Expr> opConjuncts =
-            Expr.substituteList(conjuncts, op.getSmap(), analyzer, false);
-        op.getAnalyzer().registerConjuncts(opConjuncts);
-      }
-      analyzer.markConjunctsAssigned(conjuncts);
-    } else {
-      // mark slots referenced by the yet-unassigned conjuncts
-      analyzer.materializeSlots(conjuncts);
-    }
-    // mark slots after predicate propagation but prior to plan tree generation
-    unionStmt.materializeRequiredSlots(analyzer);
-
-    PlanNode result = null;
-    // create DISTINCT tree
-    if (unionStmt.hasDistinctOps()) {
-      result = createUnionPlan(
-          analyzer, unionStmt, unionStmt.getDistinctOperands(), null);
-      result = new AggregationNode(
-          ctx_.getNextNodeId(), result, unionStmt.getDistinctAggInfo());
-      result.init(analyzer);
-    }
-    // create ALL tree
-    if (unionStmt.hasAllOps()) {
-      result = createUnionPlan(analyzer, unionStmt, unionStmt.getAllOperands(), result);
-    }
-
-    if (unionStmt.hasAnalyticExprs()) {
-      result = addUnassignedConjuncts(
-          analyzer, unionStmt.getTupleId().asList(), result);
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SingularRowSrcNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SingularRowSrcNode.java b/fe/src/main/java/com/cloudera/impala/planner/SingularRowSrcNode.java
deleted file mode 100644
index 88b3d7d..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SingularRowSrcNode.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * A SingularRowSrcNode returns the current row that is being processed by its
- * containing SubplanNode. A SingularRowSrcNode can only appear in the plan tree
- * of a SubplanNode. A SingularRowSrcNode returns its parent's smap such that
- * substitutions are appropriately applied within the SubplanNode's second child.
- */
-public class SingularRowSrcNode extends PlanNode {
-  private final SubplanNode containingSubplanNode_;
-
-  protected SingularRowSrcNode(PlanNodeId id, SubplanNode containingSubplanNode) {
-    super(id, "SINGULAR ROW SRC");
-    containingSubplanNode_ = containingSubplanNode;
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    clearTupleIds();
-    tupleIds_.addAll(containingSubplanNode_.getChild(0).getTupleIds());
-    tblRefIds_.addAll(containingSubplanNode_.getChild(0).getTblRefIds());
-    nullableTupleIds_.addAll(containingSubplanNode_.getChild(0).getNullableTupleIds());
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    super.init(analyzer);
-    outputSmap_ = containingSubplanNode_.getChild(0).getOutputSmap();
-    Preconditions.checkState(conjuncts_.isEmpty());
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = 1;
-    // The containing SubplanNode has not yet been initialized, so get the number
-    // of nodes from the SubplanNode's input.
-    numNodes_ = containingSubplanNode_.getChild(0).getNumNodes();
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s\n", prefix, getDisplayLabel()));
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(String.format(
-          "%sparent-subplan=%s\n", detailPrefix, containingSubplanNode_.getId()));
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.SINGULAR_ROW_SRC_NODE;
-  }
-}


[34/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/UpdateStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/UpdateStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/UpdateStmt.java
deleted file mode 100644
index 34e907b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/UpdateStmt.java
+++ /dev/null
@@ -1,117 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
-import com.cloudera.impala.planner.KuduTableSink;
-import com.cloudera.impala.planner.TableSink;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-import static java.lang.String.format;
-
-/**
- * Representation of an Update statement.
- *
- * Example UPDATE statement:
- *
- *     UPDATE target_table
- *       SET slotRef=expr, [slotRef=expr, ...]
- *       FROM table_ref_list
- *       WHERE conjunct_list
- *
- * An update statement consists of four major parts. First, the target table path,
- * second, the list of assignments, the optional FROM clause, and the optional where
- * clause. The type of the right-hand side of each assignments must be
- * assignment compatible with the left-hand side column type.
- *
- * Currently, only Kudu tables can be updated.
- */
-public class UpdateStmt extends ModifyStmt {
-  public UpdateStmt(List<String> targetTablePath,  FromClause tableRefs,
-      List<Pair<SlotRef, Expr>> assignmentExprs,  Expr wherePredicate,
-      boolean ignoreNotFound) {
-    super(targetTablePath, tableRefs, assignmentExprs, wherePredicate, ignoreNotFound);
-  }
-
-  public UpdateStmt(UpdateStmt other) {
-    super(other.targetTablePath_, other.fromClause_.clone(),
-        Lists.<Pair<SlotRef, Expr>>newArrayList(), other.wherePredicate_,
-        other.ignoreNotFound_);
-  }
-
-  /**
-   * Return an instance of a KuduTableSink specialized as an Update operation.
-   */
-  public DataSink createDataSink() {
-    // analyze() must have been called before.
-    Preconditions.checkState(table_ != null);
-    DataSink dataSink = TableSink.create(table_, TableSink.Op.UPDATE,
-        ImmutableList.<Expr>of(), referencedColumns_, false, ignoreNotFound_);
-    Preconditions.checkState(!referencedColumns_.isEmpty());
-    return dataSink;
-  }
-
-  @Override
-  public UpdateStmt clone() {
-    return new UpdateStmt(this);
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder b = new StringBuilder();
-    b.append("UPDATE ");
-
-    if (ignoreNotFound_) b.append("IGNORE ");
-
-    if (fromClause_ == null) {
-      b.append(targetTableRef_.toSql());
-    } else {
-      if (targetTableRef_.hasExplicitAlias()) {
-        b.append(targetTableRef_.getExplicitAlias());
-      } else {
-        b.append(targetTableRef_.toSql());
-      }
-    }
-    b.append(" SET");
-
-    boolean first = true;
-    for (Pair<SlotRef, Expr> i : assignments_) {
-      if (!first) {
-        b.append(",");
-      } else {
-        first = false;
-      }
-      b.append(format(" %s = %s",
-          i.first.toSql(),
-          i.second.toSql()));
-    }
-
-    b.append(fromClause_.toSql());
-
-    if (wherePredicate_ != null) {
-      b.append(" WHERE ");
-      b.append(wherePredicate_.toSql());
-    }
-    return b.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/UseStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/UseStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/UseStmt.java
deleted file mode 100644
index c62c454..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/UseStmt.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TUseDbParams;
-
-/**
- * Representation of a USE db statement.
- */
-public class UseStmt extends StatementBase {
-  private final String database_;
-
-  public UseStmt(String db) {
-    database_ = db;
-  }
-
-  public String getDatabase() { return database_; }
-
-  @Override
-  public String toSql() {
-    return "USE " + database_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!database_.equalsIgnoreCase(Catalog.DEFAULT_DB)) {
-      // USE <default> should always be allowed.
-      analyzer.getDb(database_, Privilege.ANY, true);
-    }
-  }
-
-  public TUseDbParams toThrift() {
-    TUseDbParams params = new TUseDbParams();
-    params.setDb(getDatabase());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ValuesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ValuesStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ValuesStmt.java
deleted file mode 100644
index ed3339d..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ValuesStmt.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a values() statement with a list of constant-expression lists.
- * ValuesStmt is a special case of a UnionStmt with the following restrictions:
- * - Operands are only constant selects
- * - Operands are connected by UNION ALL
- * - No nesting of ValuesStmts
- */
-public class ValuesStmt extends UnionStmt {
-
-  public ValuesStmt(List<UnionOperand> operands,
-      ArrayList<OrderByElement> orderByElements, LimitElement limitElement) {
-    super(operands, orderByElements, limitElement);
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private ValuesStmt(ValuesStmt other) { super(other); }
-
-  @Override
-  protected String queryStmtToSql(QueryStmt queryStmt) {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append("(");
-    appendSelectList((SelectStmt) queryStmt, strBuilder);
-    strBuilder.append(")");
-    return strBuilder.toString();
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder strBuilder = new StringBuilder();
-    if (withClause_ != null) {
-      strBuilder.append(withClause_.toSql());
-      strBuilder.append(" ");
-    }
-    Preconditions.checkState(operands_.size() > 0);
-    strBuilder.append("VALUES(");
-    for (int i = 0; i < operands_.size(); ++i) {
-      if (operands_.size() != 1) strBuilder.append("(");
-      appendSelectList((SelectStmt) operands_.get(i).getQueryStmt(), strBuilder);
-      if (operands_.size() != 1) strBuilder.append(")");
-      strBuilder.append((i+1 != operands_.size()) ? ", " : "");
-    }
-    strBuilder.append(")");
-    return strBuilder.toString();
-  }
-
-  private void appendSelectList(SelectStmt select, StringBuilder strBuilder) {
-    SelectList selectList = select.getSelectList();
-    for (int j = 0; j < selectList.getItems().size(); ++j) {
-      strBuilder.append(selectList.getItems().get(j).toSql());
-      strBuilder.append((j+1 != selectList.getItems().size()) ? ", " : "");
-    }
-  }
-
-  @Override
-  public ValuesStmt clone() { return new ValuesStmt(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/WithClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/WithClause.java b/fe/src/main/java/com/cloudera/impala/analysis/WithClause.java
deleted file mode 100644
index 70e1f29..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/WithClause.java
+++ /dev/null
@@ -1,140 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of the WITH clause that may appear before a query statement or insert
- * statement. A WITH clause contains a list of named view definitions that may be
- * referenced in the query statement that follows it.
- *
- * Scoping rules:
- * A WITH-clause view is visible inside the query statement that it belongs to.
- * This includes inline views and nested WITH clauses inside the query statement.
- *
- * Each WITH clause establishes a new analysis scope. A WITH-clause view definition
- * may refer to views from the same WITH-clause appearing to its left, and to all
- * WITH-clause views from outer scopes.
- *
- * References to WITH-clause views are resolved inside out, i.e., a match is found by
- * first looking in the current scope and then in the enclosing scope(s).
- *
- * Views defined within the same WITH-clause may not use the same alias.
- */
-public class WithClause implements ParseNode {
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  private final ArrayList<View> views_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  public WithClause(ArrayList<View> views) {
-    Preconditions.checkNotNull(views);
-    Preconditions.checkState(!views.isEmpty());
-    views_ = views;
-  }
-
-  /**
-   * Analyzes all views and registers them with the analyzer. Enforces scoping rules.
-   * All local views registered with the analyzer are have QueryStmts with resolved
-   * TableRefs to simplify the analysis of view references.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Create a new analyzer for the WITH clause with a new global state (IMPALA-1357)
-    // but a child of 'analyzer' so that the global state for 'analyzer' is not polluted
-    // during analysis of the WITH clause. withClauseAnalyzer is a child of 'analyzer' so
-    // that local views registered in parent blocks are visible here.
-    Analyzer withClauseAnalyzer = Analyzer.createWithNewGlobalState(analyzer);
-    withClauseAnalyzer.setIsWithClause();
-    if (analyzer.isExplain()) withClauseAnalyzer.setIsExplain();
-    try {
-      for (View view: views_) {
-        Analyzer viewAnalyzer = new Analyzer(withClauseAnalyzer);
-        view.getQueryStmt().analyze(viewAnalyzer);
-        // Register this view so that the next view can reference it.
-        withClauseAnalyzer.registerLocalView(view);
-      }
-      // Register all local views with the analyzer.
-      for (View localView: withClauseAnalyzer.getLocalViews().values()) {
-        analyzer.registerLocalView(localView);
-      }
-      // Record audit events because the resolved table references won't generate any
-      // when a view is referenced.
-      analyzer.getAccessEvents().addAll(withClauseAnalyzer.getAccessEvents());
-
-      // Register all privilege requests made from the root analyzer.
-      for (PrivilegeRequest req: withClauseAnalyzer.getPrivilegeReqs()) {
-        analyzer.registerPrivReq(req);
-      }
-    } finally {
-      // Record missing tables in the original analyzer.
-      if (analyzer.isRootAnalyzer()) {
-        analyzer.getMissingTbls().addAll(withClauseAnalyzer.getMissingTbls());
-      }
-    }
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private WithClause(WithClause other) {
-    Preconditions.checkNotNull(other);
-    views_ = Lists.newArrayList();
-    for (View view: other.views_) {
-      views_.add(new View(view.getName(), view.getQueryStmt().clone(),
-          view.getOriginalColLabels()));
-    }
-  }
-
-  public void reset() {
-    for (View view: views_) view.getQueryStmt().reset();
-  }
-
-  @Override
-  public WithClause clone() { return new WithClause(this); }
-
-  @Override
-  public String toSql() {
-    List<String> viewStrings = Lists.newArrayList();
-    for (View view: views_) {
-      // Enclose the view alias and explicit labels in quotes if Hive cannot parse it
-      // without quotes. This is needed for view compatibility between Impala and Hive.
-      String aliasSql = ToSqlUtils.getIdentSql(view.getName());
-      if (view.hasColLabels()) {
-        aliasSql += "(" + Joiner.on(", ").join(
-            ToSqlUtils.getIdentSqlList(view.getOriginalColLabels())) + ")";
-      }
-      viewStrings.add(aliasSql + " AS (" + view.getQueryStmt().toSql() + ")");
-    }
-    return "WITH " + Joiner.on(",").join(viewStrings);
-  }
-
-  public List<View> getViews() { return views_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationChecker.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationChecker.java
deleted file mode 100644
index 32f60df..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationChecker.java
+++ /dev/null
@@ -1,190 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.lang.reflect.ConstructorUtils;
-import org.apache.sentry.core.common.ActiveRoleSet;
-import org.apache.sentry.core.common.Subject;
-import org.apache.sentry.core.model.db.DBModelAction;
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-import org.apache.sentry.policy.db.SimpleDBPolicyEngine;
-import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackendContext;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-import org.apache.sentry.provider.file.SimpleFileProviderBackend;
-
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.AuthorizationPolicy;
-import com.cloudera.impala.common.InternalException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/*
- * Class used to check whether a user has access to a given resource.
- */
-public class AuthorizationChecker {
-  private final ResourceAuthorizationProvider provider_;
-  private final AuthorizationConfig config_;
-  private final AuthorizeableServer server_;
-
-  /*
-   * Creates a new AuthorizationChecker based on the config values.
-   */
-  public AuthorizationChecker(AuthorizationConfig config, AuthorizationPolicy policy) {
-    Preconditions.checkNotNull(config);
-    config_ = config;
-    if (config.isEnabled()) {
-      server_ = new AuthorizeableServer(config.getServerName());
-      provider_ = createProvider(config, policy);
-      Preconditions.checkNotNull(provider_);
-    } else {
-      provider_ = null;
-      server_ = null;
-    }
-  }
-
-  /*
-   * Creates a new ResourceAuthorizationProvider based on the given configuration.
-   */
-  private static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
-      AuthorizationPolicy policy) {
-    try {
-      ProviderBackend providerBe;
-      // Create the appropriate backend provider.
-      if (config.isFileBasedPolicy()) {
-        providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
-            config.getPolicyFile());
-      } else {
-        // Note: The second parameter to the ProviderBackend is a "resourceFile" path
-        // which is not used by Impala. We cannot pass 'null' so instead pass an empty
-        // string.
-        providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
-            "");
-        Preconditions.checkNotNull(policy);
-        ProviderBackendContext context = new ProviderBackendContext();
-        context.setBindingHandle(policy);
-        providerBe.initialize(context);
-      }
-
-      SimpleDBPolicyEngine engine =
-          new SimpleDBPolicyEngine(config.getServerName(), providerBe);
-
-      // Try to create an instance of the specified policy provider class.
-      // Re-throw any exceptions that are encountered.
-      String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
-      return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
-          Class.forName(config.getPolicyProviderClassName()),
-          new Object[] {policyFile, engine});
-    } catch (Exception e) {
-      // Re-throw as unchecked exception.
-      throw new IllegalStateException(
-          "Error creating ResourceAuthorizationProvider: ", e);
-    }
-  }
-
-  /*
-   * Returns the configuration used to create this AuthorizationProvider.
-   */
-  public AuthorizationConfig getConfig() { return config_; }
-
-  /**
-   * Returns the set of groups this user belongs to. Uses the GroupMappingService
-   * that is in the AuthorizationProvider to properly resolve Hadoop groups or
-   * local group mappings.
-   */
-  public Set<String> getUserGroups(User user) throws InternalException {
-    return provider_.getGroupMapping().getGroups(user.getShortName());
-  }
-
-  /**
-   * Authorizes the PrivilegeRequest, throwing an Authorization exception if
-   * the user does not have sufficient privileges.
-   */
-  public void checkAccess(User user, PrivilegeRequest privilegeRequest)
-      throws AuthorizationException, InternalException {
-    Preconditions.checkNotNull(privilegeRequest);
-
-    if (!hasAccess(user, privilegeRequest)) {
-      if (privilegeRequest.getAuthorizeable() instanceof AuthorizeableFn) {
-        throw new AuthorizationException(String.format(
-            "User '%s' does not have privileges to CREATE/DROP functions.",
-            user.getName()));
-      }
-
-      Privilege privilege = privilegeRequest.getPrivilege();
-      if (EnumSet.of(Privilege.ANY, Privilege.ALL, Privilege.VIEW_METADATA)
-          .contains(privilege)) {
-        throw new AuthorizationException(String.format(
-            "User '%s' does not have privileges to access: %s",
-            user.getName(), privilegeRequest.getName()));
-      } else {
-        throw new AuthorizationException(String.format(
-            "User '%s' does not have privileges to execute '%s' on: %s",
-            user.getName(), privilege, privilegeRequest.getName()));
-      }
-    }
-  }
-
-  /*
-   * Returns true if the given user has permission to execute the given
-   * request, false otherwise. Always returns true if authorization is disabled.
-   */
-  public boolean hasAccess(User user, PrivilegeRequest request)
-      throws InternalException {
-    Preconditions.checkNotNull(user);
-    Preconditions.checkNotNull(request);
-
-    // If authorization is not enabled the user will always have access. If this is
-    // an internal request, the user will always have permission.
-    if (!config_.isEnabled() || user instanceof ImpalaInternalAdminUser) {
-      return true;
-    }
-
-    EnumSet<DBModelAction> actions = request.getPrivilege().getHiveActions();
-
-    List<DBModelAuthorizable> authorizeables = Lists.newArrayList(
-        server_.getHiveAuthorizeableHierarchy());
-    // If request.getAuthorizeable() is null, the request is for server-level permission.
-    if (request.getAuthorizeable() != null) {
-      authorizeables.addAll(request.getAuthorizeable().getHiveAuthorizeableHierarchy());
-    }
-
-    // The Hive Access API does not currently provide a way to check if the user
-    // has any privileges on a given resource.
-    if (request.getPrivilege().getAnyOf()) {
-      for (DBModelAction action: actions) {
-        if (provider_.hasAccess(new Subject(user.getShortName()), authorizeables,
-            EnumSet.of(action), ActiveRoleSet.ALL)) {
-          return true;
-        }
-      }
-      return false;
-    } else if (request.getPrivilege() == Privilege.CREATE && authorizeables.size() > 1) {
-      // CREATE on an object requires CREATE on the parent,
-      // so don't check access on the object we're creating.
-      authorizeables.remove(authorizeables.size() - 1);
-    }
-    return provider_.hasAccess(new Subject(user.getShortName()), authorizeables, actions,
-        ActiveRoleSet.ALL);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationConfig.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationConfig.java
deleted file mode 100644
index 34dbada..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizationConfig.java
+++ /dev/null
@@ -1,148 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-
-import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
-/*
- * Class that contains configuration details for Impala authorization.
- */
-public class AuthorizationConfig {
-  private final String serverName_;
-  // Set only if the policy provider is file-based.
-  private final String policyFile_;
-  private final SentryConfig sentryConfig_;
-  private final String policyProviderClassName_;
-
-  /**
-   * Creates a new authorization configuration object.
-   * @param serverName - The name of this Impala server.
-   * @param policyFile - The path to the authorization policy file or null if
-   *                     the policy engine is not file based.
-   * @param sentryConfigFile - Absolute path and file name of the sentry service.
-   * @param policyProviderClassName - Class name of the policy provider to use.
-   */
-  public AuthorizationConfig(String serverName, String policyFile,
-      String sentryConfigFile, String policyProviderClassName) {
-    serverName_ = serverName;
-    policyFile_ = policyFile;
-    sentryConfig_ = new SentryConfig(sentryConfigFile);
-    if (!Strings.isNullOrEmpty(policyProviderClassName)) {
-      policyProviderClassName = policyProviderClassName.trim();
-    }
-    policyProviderClassName_ = policyProviderClassName;
-  }
-
-  /**
-   * Returns an AuthorizationConfig object that has authorization disabled.
-   */
-  public static AuthorizationConfig createAuthDisabledConfig() {
-    return new AuthorizationConfig(null, null, null, null);
-  }
-
-  /**
-   * Returns an AuthorizationConfig object configured to use Hadoop user->group mappings
-   * for the authorization provider.
-   */
-  public static AuthorizationConfig createHadoopGroupAuthConfig(String serverName,
-      String policyFile, String sentryConfigFile) {
-    return new AuthorizationConfig(serverName, policyFile, sentryConfigFile,
-        HadoopGroupResourceAuthorizationProvider.class.getName());
-  }
-
-  /*
-   * Validates the authorization configuration and throws an AuthorizationException
-   * if any problems are found. If authorization is disabled, config checks are skipped.
-   */
-  public void validateConfig() throws IllegalArgumentException {
-    // If authorization is not enabled, config checks are skipped.
-    if (!isEnabled()) return;
-
-    // Only load the sentry configuration if a sentry-site.xml configuration file was
-    // specified. It is optional for impalad.
-    if (!Strings.isNullOrEmpty(sentryConfig_.getConfigFile())) {
-      sentryConfig_.loadConfig();
-    }
-
-    if (Strings.isNullOrEmpty(serverName_)) {
-      throw new IllegalArgumentException(
-          "Authorization is enabled but the server name is null or empty. Set the " +
-          "server name using the impalad --server_name flag.");
-    }
-    if (Strings.isNullOrEmpty(policyProviderClassName_)) {
-      throw new IllegalArgumentException("Authorization is enabled but the " +
-          "authorization policy provider class name is null or empty. Set the class " +
-          "name using the --authorization_policy_provider_class impalad flag.");
-    }
-
-    Class<?> providerClass = null;
-    try {
-      // Get the Class object without performing any initialization.
-      providerClass = Class.forName(policyProviderClassName_, false,
-          this.getClass().getClassLoader());
-    } catch (ClassNotFoundException e) {
-      throw new IllegalArgumentException(String.format("The authorization policy " +
-          "provider class '%s' was not found.", policyProviderClassName_), e);
-    }
-    Preconditions.checkNotNull(providerClass);
-    if (!ResourceAuthorizationProvider.class.isAssignableFrom(providerClass)) {
-      throw new IllegalArgumentException(String.format("The authorization policy " +
-          "provider class '%s' must be a subclass of '%s'.",
-          policyProviderClassName_,
-          ResourceAuthorizationProvider.class.getName()));
-    }
-  }
-
-  /**
-   * Returns true if authorization is enabled.
-   * If either serverName, policyFile, or sentryServiceConfig_ file is set (not null
-   * or empty), authorization is considered enabled.
-   */
-  public boolean isEnabled() {
-    return !Strings.isNullOrEmpty(serverName_) || !Strings.isNullOrEmpty(policyFile_) ||
-        !Strings.isNullOrEmpty(sentryConfig_.getConfigFile());
-  }
-
-  /**
-   * Returns true if using an authorization policy from a file in HDFS. If false,
-   * uses an authorization policy based on cached metadata sent from the catalog server
-   * via the statestore.
-   */
-  public boolean isFileBasedPolicy() { return !Strings.isNullOrEmpty(policyFile_); }
-
-  /**
-   * The server name to secure.
-   */
-  public String getServerName() { return serverName_; }
-
-  /**
-   * The policy file path.
-   */
-  public String getPolicyFile() { return policyFile_; }
-
-  /**
-   * The Sentry configuration.
-   */
-  public SentryConfig getSentryConfig() { return sentryConfig_; }
-  public String getPolicyProviderClassName() { return policyProviderClassName_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/Authorizeable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/Authorizeable.java b/fe/src/main/java/com/cloudera/impala/authorization/Authorizeable.java
deleted file mode 100644
index 5782ed5..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/Authorizeable.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-/*
- * Abstract class representing an authorizeable object (Table, Db, Column, etc).
- */
-public abstract class Authorizeable {
-  /*
-  * Returns the list of the Hive "authorizeable" objects in their hierarchical order.
-  * For example:
-  * [Column] would return Db -> Table -> Column
-  * [Table] would return Db -> Table
-  * [Db] would return [Db]
-  * [URI] would return [URI]
-  */
-  public abstract List<DBModelAuthorizable> getHiveAuthorizeableHierarchy();
-
-  // Returns the name of the object.
-  public abstract String getName();
-
-  // Returns the full table name if applicable, null otherwise.
-  public String getFullTableName() { return null; }
-
-  // Returns the database name if applicable, null otherwise.
-  public String getDbName() { return null; }
-
-  @Override
-  public int hashCode() { return getName().hashCode(); }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o == null) return false;
-    if (o.getClass() != this.getClass()) return false;
-    return ((Authorizeable) o).getName().equals(this.getName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableColumn.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableColumn.java
deleted file mode 100644
index d5c9cd0..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableColumn.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Class used to authorize access to a column.
- */
-public class AuthorizeableColumn extends Authorizeable {
-  private final org.apache.sentry.core.model.db.Column column_;
-  private final org.apache.sentry.core.model.db.Table table_;
-  private final org.apache.sentry.core.model.db.Database database_;
-  public final static String ANY_COLUMN_NAME =
-      org.apache.sentry.core.model.db.AccessConstants.ALL;
-
-  public AuthorizeableColumn(String dbName, String tableName, String columnName) {
-    Preconditions.checkState(!Strings.isNullOrEmpty(dbName));
-    Preconditions.checkState(!Strings.isNullOrEmpty(tableName));
-    Preconditions.checkState(!Strings.isNullOrEmpty(columnName));
-    column_ = new org.apache.sentry.core.model.db.Column(columnName);
-    table_ = new org.apache.sentry.core.model.db.Table(tableName);
-    database_ = new org.apache.sentry.core.model.db.Database(dbName);
-  }
-
-  @Override
-  public List<DBModelAuthorizable> getHiveAuthorizeableHierarchy() {
-    return Lists.newArrayList(database_, table_, column_);
-  }
-
-  @Override
-  public String getName() { return database_.getName() + "." + table_.getName() + "."
-      + column_.getName(); }
-
-  @Override
-  public String getFullTableName() {
-    return database_.getName() + "." + table_.getName();
-  }
-
-  @Override
-  public String getDbName() { return database_.getName(); }
-
-  public String getTblName() { return table_.getName(); }
-  public String getColumnName() { return column_.getName(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableDb.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableDb.java
deleted file mode 100644
index e27ac52..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableDb.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/*
- * Class used to authorize access to a database.
- */
-public class AuthorizeableDb extends Authorizeable {
-  private final org.apache.sentry.core.model.db.Database database_;
-
-  public AuthorizeableDb(String dbName) {
-    Preconditions.checkState(dbName != null && !dbName.isEmpty());
-    database_ = new org.apache.sentry.core.model.db.Database(dbName);
-  }
-
-  @Override
-  public List<DBModelAuthorizable> getHiveAuthorizeableHierarchy() {
-    return Lists.newArrayList((DBModelAuthorizable) database_);
-  }
-
-  @Override
-  public String getName() { return database_.getName(); }
-
-  @Override
-  public String getDbName() { return getName(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableFn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableFn.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableFn.java
deleted file mode 100644
index 0ff4ca3..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableFn.java
+++ /dev/null
@@ -1,45 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Class used to authorize access to a Function.
- */
-public class AuthorizeableFn extends Authorizeable {
-  private final String fnName_;
-
-  public AuthorizeableFn(String fnName) {
-    Preconditions.checkState(fnName != null && !fnName.isEmpty());
-    fnName_ = fnName;
-  }
-
-  @Override
-  public List<DBModelAuthorizable> getHiveAuthorizeableHierarchy() {
-    return Lists.newArrayList();
-  }
-
-  @Override
-  public String getName() { return fnName_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableServer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableServer.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableServer.java
deleted file mode 100644
index 4ca3218..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableServer.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Class used to authorize access at the catalog level. Generally, all Impala
- * services in the cluster will be configured with the same catalog name.
- * What Sentry refers to as a Server maps to our concept of a Catalog, thus
- * the name AuthorizeableServer.
- */
-public class AuthorizeableServer extends Authorizeable {
-  private final org.apache.sentry.core.model.db.Server server_;
-
-  public AuthorizeableServer(String serverName) {
-    Preconditions.checkState(!Strings.isNullOrEmpty(serverName));
-    server_ = new org.apache.sentry.core.model.db.Server(serverName);
-  }
-
-  @Override
-  public List<DBModelAuthorizable> getHiveAuthorizeableHierarchy() {
-    return Lists.newArrayList((DBModelAuthorizable) server_);
-  }
-
-  @Override
-  public String getName() { return server_.getName(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableTable.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableTable.java
deleted file mode 100644
index e28d5a1..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableTable.java
+++ /dev/null
@@ -1,63 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import org.apache.sentry.core.model.db.DBModelAuthorizable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Class used to authorize access to a table or view.
- * Even though Hive's spec includes an authorizable object 'view', we chose
- * to treat views the same way as tables for the sake of authorization.
- */
-public class AuthorizeableTable extends Authorizeable {
-  // Constant to represent privileges in the policy for "ANY" table in a
-  // a database.
-  public final static String ANY_TABLE_NAME =
-      org.apache.sentry.core.model.db.AccessConstants.ALL;
-
-  private final org.apache.sentry.core.model.db.Table table_;
-  private final org.apache.sentry.core.model.db.Database database_;
-
-  public AuthorizeableTable(String dbName, String tableName) {
-    Preconditions.checkState(!Strings.isNullOrEmpty(tableName));
-    Preconditions.checkState(!Strings.isNullOrEmpty(dbName));
-    table_ = new org.apache.sentry.core.model.db.Table(tableName);
-    database_ = new org.apache.sentry.core.model.db.Database(dbName);
-  }
-
-  @Override
-  public List<DBModelAuthorizable> getHiveAuthorizeableHierarchy() {
-    return Lists.newArrayList(database_, table_);
-  }
-
-  @Override
-  public String getName() { return database_.getName() + "." + table_.getName(); }
-
-  @Override
-  public String getDbName() { return database_.getName(); }
-  public String getTblName() { return table_.getName(); }
-
-  @Override
-  public String getFullTableName() { return getName(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableUri.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableUri.java b/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableUri.java
deleted file mode 100644
index 1997457..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/AuthorizeableUri.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.List;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/*
- * Class used to authorize access to a URI.
- */
-public class AuthorizeableUri extends Authorizeable {
-  private final String uriName_;
-
-  public AuthorizeableUri(String uriName) {
-    Preconditions.checkNotNull(uriName);
-    uriName_ = uriName;
-  }
-
-  @Override
-  public List<org.apache.sentry.core.model.db.DBModelAuthorizable>
-      getHiveAuthorizeableHierarchy() {
-    org.apache.sentry.core.model.db.AccessURI accessURI =
-        new org.apache.sentry.core.model.db.AccessURI(uriName_);
-    return Lists.newArrayList(
-        (org.apache.sentry.core.model.db.DBModelAuthorizable) accessURI);
-  }
-
-  @Override
-  public String getName() { return uriName_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/ImpalaInternalAdminUser.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/ImpalaInternalAdminUser.java b/fe/src/main/java/com/cloudera/impala/authorization/ImpalaInternalAdminUser.java
deleted file mode 100644
index a5c14c1..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/ImpalaInternalAdminUser.java
+++ /dev/null
@@ -1,36 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-/*
- * A singleton class that represents a special user type used for internal Impala
- * sessions (for example, populating the debug webpage Catalog view). This user has
- * all privileges on all objects in the server.
- */
-public class ImpalaInternalAdminUser extends User {
-  private final static ImpalaInternalAdminUser instance_ = new ImpalaInternalAdminUser();
-
-  private ImpalaInternalAdminUser() {
-    super("Impala Internal Admin User");
-  }
-
-  /*
-   * Returns an instance of the ImpalaInternalAdminUser.
-   */
-  public static ImpalaInternalAdminUser getInstance() { return instance_; }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/Privilege.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/Privilege.java b/fe/src/main/java/com/cloudera/impala/authorization/Privilege.java
deleted file mode 100644
index 9b44517..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/Privilege.java
+++ /dev/null
@@ -1,70 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.util.EnumSet;
-
-import org.apache.sentry.core.model.db.DBModelAction;
-
-/*
- * Maps an Impala Privilege to one or more Hive Access "Actions".
- */
-public enum Privilege {
-  ALL(DBModelAction.ALL, false),
-  ALTER(DBModelAction.ALL, false),
-  DROP(DBModelAction.ALL, false),
-  CREATE(DBModelAction.ALL, false),
-  INSERT(DBModelAction.INSERT, false),
-  SELECT(DBModelAction.SELECT, false),
-  // Privileges required to view metadata on a server object.
-  VIEW_METADATA(EnumSet.of(DBModelAction.INSERT, DBModelAction.SELECT), true),
-  // Special privilege that is used to determine if the user has any valid privileges
-  // on a target object.
-  ANY(EnumSet.allOf(DBModelAction.class), true),
-  ;
-
-  private final EnumSet<DBModelAction> actions;
-
-  // Determines whether to check if the user has ANY the privileges defined in the
-  // actions list or whether to check if the user has ALL of the privileges in the
-  // actions list.
-  private final boolean anyOf_;
-
-  private Privilege(EnumSet<DBModelAction> actions, boolean anyOf) {
-    this.actions = actions;
-    this.anyOf_ = anyOf;
-  }
-
-  private Privilege(DBModelAction action, boolean anyOf) {
-    this(EnumSet.of(action), anyOf);
-  }
-
-  /*
-   * Returns the set of Hive Access Actions mapping to this Privilege.
-   */
-  public EnumSet<DBModelAction> getHiveActions() {
-    return actions;
-  }
-
-  /*
-   * Determines whether to check if the user has ANY the privileges defined in the
-   * actions list or whether to check if the user has ALL of the privileges in the
-   * actions list.
-   */
-  public boolean getAnyOf() { return anyOf_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequest.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequest.java b/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequest.java
deleted file mode 100644
index bacc4b2..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import com.google.common.base.Preconditions;
-
-/*
- * Represents a privilege request in the context of an Authorizeable object. If no
- * Authorizeable object is provided, it represents a privilege request on the server.
- * For example, SELECT on table Foo in database Bar.
- */
-public class PrivilegeRequest {
-  private final Authorizeable authorizeable_;
-  private final Privilege privilege_;
-
-  public PrivilegeRequest(Authorizeable authorizeable, Privilege privilege) {
-    Preconditions.checkNotNull(authorizeable);
-    Preconditions.checkNotNull(privilege);
-    authorizeable_ = authorizeable;
-    privilege_ = privilege;
-  }
-
-  public PrivilegeRequest(Privilege privilege) {
-    Preconditions.checkNotNull(privilege);
-    authorizeable_ = null;
-    privilege_ = privilege;
-  }
-
-  /*
-   * Name of the Authorizeable. Authorizeable refers to the server if it's null.
-   */
-  public String getName() {
-    return (authorizeable_ != null) ? authorizeable_.getName() : "server";
-  }
-
-  /*
-   * Requested privilege on the Authorizeable.
-   */
-  public Privilege getPrivilege() { return privilege_; }
-
-
-  /*
-   * Returns Authorizeable object. Null if the request is for server-level permission.
-   */
-  public Authorizeable getAuthorizeable() { return authorizeable_; }
-
-  @Override
-  public int hashCode() {
-    return (authorizeable_ == null ? 0 : authorizeable_.hashCode()) * 37 +
-        privilege_.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof PrivilegeRequest)) return false;
-    if (authorizeable_ == null) {
-      return ((PrivilegeRequest) o).getPrivilege().equals(privilege_);
-    }
-    return ((PrivilegeRequest) o).getAuthorizeable().equals(authorizeable_) &&
-        ((PrivilegeRequest) o).getPrivilege().equals(privilege_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequestBuilder.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequestBuilder.java b/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequestBuilder.java
deleted file mode 100644
index 13dc909..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/PrivilegeRequestBuilder.java
+++ /dev/null
@@ -1,119 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Class that helps build PrivilegeRequest objects.
- * For example:
- * PrivilegeRequestBuilder builder = new PrivilegeRequestBuilder();
- * PrivilegeRequest = builder.allOf(Privilege.SELECT).onTable("db", "tbl").toRequest();
- *
- * TODO: In the future, this class could be extended to provide the option to specify
- * multiple permissions. For example:
- * builder.allOf(SELECT, INSERT).onTable(..);
- * It could also be extended to support an "anyOf" to check if the user has any of the
- * permissions specified:
- * builder.anyOf(SELECT, INSERT).onTable(...);
- */
-public class PrivilegeRequestBuilder {
-  Authorizeable authorizeable_;
-  Privilege privilege_;
-
-  /**
-   * Sets the authorizeable object to be a column.
-   */
-  public PrivilegeRequestBuilder onColumn(String dbName, String tableName,
-      String columnName) {
-    authorizeable_ = new AuthorizeableColumn(dbName, tableName, columnName);
-    return this;
-  }
-
-  /**
-   * Sets the authorizeable object to be a table.
-   */
-  public PrivilegeRequestBuilder onTable(String dbName, String tableName) {
-    authorizeable_ = new AuthorizeableTable(dbName, tableName);
-    return this;
-  }
-
-  /**
-   * Sets the authorizeable object to be a database.
-   */
-  public PrivilegeRequestBuilder onDb(String dbName) {
-    authorizeable_ = new AuthorizeableDb(dbName);
-    return this;
-  }
-
-  /**
-   * Sets the authorizeable object to be a URI.
-   */
-  public PrivilegeRequestBuilder onURI(String uriName) {
-    authorizeable_ = new AuthorizeableUri(uriName);
-    return this;
-  }
-
-  /**
-   * Specifies that permissions on any table in the given database.
-   */
-  public PrivilegeRequestBuilder onAnyTable(String dbName) {
-    return onTable(dbName, AuthorizeableTable.ANY_TABLE_NAME);
-  }
-
-  /**
-   * Specifies that permissions on any column in the given table.
-   */
-  public PrivilegeRequestBuilder onAnyColumn(String dbName, String tableName) {
-    return onColumn(dbName, tableName, AuthorizeableColumn.ANY_COLUMN_NAME);
-  }
-
-  /**
-   * Specifies the privilege the user needs to have.
-   */
-  public PrivilegeRequestBuilder allOf(Privilege privilege) {
-    privilege_ = privilege;
-    return this;
-  }
-
-  /**
-   * Specifies the user needs "ALL" privileges
-   */
-  public PrivilegeRequestBuilder all() {
-    privilege_ = Privilege.ALL;
-    return this;
-  }
-
-  /**
-   * Specifies that any privileges are sufficient.
-   */
-  public PrivilegeRequestBuilder any() {
-    privilege_ = Privilege.ANY;
-    return this;
-  }
-
-  /**
-   * Builds a PrivilegeRequest object based on the current Authorizeable object
-   * and privilege settings.
-   */
-  public PrivilegeRequest toRequest() {
-    Preconditions.checkNotNull(authorizeable_);
-    Preconditions.checkNotNull(privilege_);
-    return new PrivilegeRequest(authorizeable_, privilege_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/SentryConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/SentryConfig.java b/fe/src/main/java/com/cloudera/impala/authorization/SentryConfig.java
deleted file mode 100644
index 48300f4..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/SentryConfig.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import java.io.File;
-import java.net.MalformedURLException;
-
-import org.apache.hadoop.conf.Configuration;
-
-import com.cloudera.impala.common.FileSystemUtil;
-import com.google.common.base.Strings;
-
-/**
- * Class used to load a sentry-site.xml configuration file.
- */
-public class SentryConfig {
-  // Absolute path to the sentry-site.xml configuration file.
-  private final String configFile_;
-
-  // The Sentry configuration. Valid only after calling loadConfig().
-  private final Configuration config_;
-
-  public SentryConfig(String configFilePath) {
-    configFile_ = configFilePath;
-    config_ = FileSystemUtil.getConfiguration();
-  }
-
-  /**
-   * Initializes the Sentry configuration.
-   */
-  public void loadConfig() {
-    if (Strings.isNullOrEmpty(configFile_)) {
-      throw new IllegalArgumentException("A valid path to a sentry-site.xml config " +
-          "file must be set using --sentry_config to enable authorization.");
-    }
-
-    File configFile = new File(configFile_);
-    if (!configFile.exists()) {
-      String configFilePath = "\"" + configFile_ + "\"";
-      throw new RuntimeException("Sentry configuration file does not exist: " +
-          configFilePath);
-    }
-
-    if (!configFile.canRead()) {
-      throw new RuntimeException("Cannot read Sentry configuration file: " +
-          configFile_);
-    }
-
-    // Load the config.
-    try {
-      config_.addResource(configFile.toURI().toURL());
-    } catch (MalformedURLException e) {
-      throw new RuntimeException("Invalid Sentry config file path: " + configFile_, e);
-    }
-  }
-
-  public Configuration getConfig() { return config_; }
-  public String getConfigFile() { return configFile_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/authorization/User.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/authorization/User.java b/fe/src/main/java/com/cloudera/impala/authorization/User.java
deleted file mode 100644
index a282e54..0000000
--- a/fe/src/main/java/com/cloudera/impala/authorization/User.java
+++ /dev/null
@@ -1,104 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.authorization;
-
-import com.google.common.base.Preconditions;
-import com.google.common.annotations.VisibleForTesting;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.service.BackendConfig;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-
-/*
- * Class that represents a User of an Impala session.
- */
-public class User {
-
-  static {
-    // If auth_to_local is enabled, we read the configuration hadoop.security.auth_to_local
-    // from core-site.xml and use it for principal to short name conversion. If it is not,
-    // we use the defaultRule ("RULE:[1:$1] RULE:[2:$1]"), which just extracts the user
-    // name from any principal of form a@REALM or a/b@REALM. If auth_to_local is enabled
-    // and hadoop.security.auth_to_local is not specified in the hadoop configs, we use
-    // the "DEFAULT" rule that just extracts the username from any principal in the
-    // cluster's local realm. For more details on principal to short name translation,
-    // refer to org.apache.hadoop.security.KerberosName.
-    final String defaultRule = "RULE:[1:$1] RULE:[2:$1]";
-    final Configuration conf = new Configuration();
-    if (BackendConfig.isAuthToLocalEnabled()) {
-      KerberosName.setRules(conf.get(HADOOP_SECURITY_AUTH_TO_LOCAL, "DEFAULT"));
-    } else {
-      // just extract the simple user name
-      KerberosName.setRules(defaultRule);
-    }
-  }
-
-  private final String name_;
-
-  private KerberosName kerberosName_;
-
-  public User(String name) {
-    Preconditions.checkNotNull(name);
-    name_ = name;
-    this.kerberosName_ = new KerberosName(name);
-  }
-
-  public String getName() { return name_; }
-
-  public String getShortName() throws InternalException {
-    try {
-      return kerberosName_.getShortName();
-    } catch (IOException e) {
-      throw new InternalException(
-          "Error calling getShortName() for user: " + getName(), e);
-    }
-  }
-
-  /*
-   * Returns the shortname for the user after applying auth_to_local
-   * rules from string 'rules'. This is exposed for testing purposes only.
-   * Ideally these rules are populated from hdfs configuration files.
-   */
-  @VisibleForTesting
-  public String getShortNameForTesting(String rules) {
-    Preconditions.checkNotNull(rules);
-    Preconditions.checkState(RuntimeEnv.INSTANCE.isTestEnv());
-    String currentRules = KerberosName.getRules();
-    KerberosName.setRules(rules);
-    String shortName = null;
-    try {
-      shortName = getShortName();
-    } catch (InternalException e) {
-      e.printStackTrace();
-    }
-    // reset the rules
-    KerberosName.setRules(currentRules);
-    return shortName;
-  }
-
-  @VisibleForTesting
-  public static void setRulesForTesting(String rules) {
-    Preconditions.checkState(RuntimeEnv.INSTANCE.isTestEnv());
-    KerberosName.setRules(rules);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/AggregateFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/AggregateFunction.java b/fe/src/main/java/com/cloudera/impala/catalog/AggregateFunction.java
deleted file mode 100644
index eb968fd..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/AggregateFunction.java
+++ /dev/null
@@ -1,241 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.thrift.TAggregateFunction;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-
-/**
- * Internal representation of an aggregate function.
- * TODO: Create separate AnalyticFunction class
- */
-public class AggregateFunction extends Function {
-  // Set if different from retType_, null otherwise.
-  private Type intermediateType_;
-
-  // The symbol inside the binary at location_ that contains this particular.
-  // They can be null if it is not required.
-  private String updateFnSymbol_;
-  private String initFnSymbol_;
-  private String serializeFnSymbol_;
-  private String mergeFnSymbol_;
-  private String getValueFnSymbol_;
-  private String removeFnSymbol_;
-  private String finalizeFnSymbol_;
-
-  private static String BE_BUILTINS_CLASS = "AggregateFunctions";
-
-  // If true, this aggregate function should ignore distinct.
-  // e.g. min(distinct col) == min(col).
-  // TODO: currently it is not possible for user functions to specify this. We should
-  // extend the create aggregate function stmt to allow additional metadata like this.
-  private boolean ignoresDistinct_;
-
-  // True if this function can appear within an analytic expr (fn() OVER(...)).
-  // TODO: Instead of manually setting this flag for all builtin aggregate functions
-  // we should identify this property from the function itself (e.g., based on which
-  // functions of the UDA API are implemented).
-  // Currently, there is no reliable way of doing that.
-  private boolean isAnalyticFn_;
-
-  // True if this function can be used for aggregation (without an OVER() clause).
-  private boolean isAggregateFn_;
-
-  // True if this function returns a non-null value on an empty input. It is used
-  // primarily during the rewrite of scalar subqueries.
-  // TODO: Instead of manually setting this flag, we should identify this
-  // property from the function itself (e.g. evaluating the function on an
-  // empty input in BE).
-  private boolean returnsNonNullOnEmpty_;
-
-  public AggregateFunction(FunctionName fnName, ArrayList<Type> argTypes, Type retType,
-      boolean hasVarArgs) {
-    super(fnName, argTypes, retType, hasVarArgs);
-  }
-
-  public AggregateFunction(FunctionName fnName, List<Type> argTypes,
-      Type retType, Type intermediateType,
-      HdfsUri location, String updateFnSymbol, String initFnSymbol,
-      String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol,
-      String removeFnSymbol, String finalizeFnSymbol) {
-    super(fnName, argTypes, retType, false);
-    setLocation(location);
-    intermediateType_ = (intermediateType.equals(retType)) ? null : intermediateType;
-    updateFnSymbol_ = updateFnSymbol;
-    initFnSymbol_ = initFnSymbol;
-    serializeFnSymbol_ = serializeFnSymbol;
-    mergeFnSymbol_ = mergeFnSymbol;
-    getValueFnSymbol_ = getValueFnSymbol;
-    removeFnSymbol_ = removeFnSymbol;
-    finalizeFnSymbol_ = finalizeFnSymbol;
-    ignoresDistinct_ = false;
-    isAnalyticFn_ = false;
-    isAggregateFn_ = true;
-    returnsNonNullOnEmpty_ = false;
-  }
-
-  public static AggregateFunction createForTesting(FunctionName fnName,
-      List<Type> argTypes, Type retType, Type intermediateType,
-      HdfsUri location, String updateFnSymbol, String initFnSymbol,
-      String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol,
-      String removeFnSymbol, String finalizeFnSymbol,
-      TFunctionBinaryType fnType) {
-    AggregateFunction fn = new AggregateFunction(fnName, argTypes, retType,
-        intermediateType, location, updateFnSymbol, initFnSymbol,
-        serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol,
-        finalizeFnSymbol);
-    fn.setBinaryType(fnType);
-    return fn;
-  }
-
-  public static AggregateFunction createBuiltin(Db db, String name,
-      List<Type> argTypes, Type retType, Type intermediateType,
-      String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-      String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct,
-      boolean isAnalyticFn, boolean returnsNonNullOnEmpty) {
-    return createBuiltin(db, name, argTypes, retType, intermediateType, initFnSymbol,
-        updateFnSymbol, mergeFnSymbol, serializeFnSymbol, null, null, finalizeFnSymbol,
-        ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty);
-  }
-
-  public static AggregateFunction createBuiltin(Db db, String name,
-      List<Type> argTypes, Type retType, Type intermediateType,
-      String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-      String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
-      String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
-      boolean returnsNonNullOnEmpty) {
-    AggregateFunction fn = new AggregateFunction(new FunctionName(db.getName(), name),
-        argTypes, retType, intermediateType, null, updateFnSymbol, initFnSymbol,
-        serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol,
-        finalizeFnSymbol);
-    fn.setBinaryType(TFunctionBinaryType.BUILTIN);
-    fn.ignoresDistinct_ = ignoresDistinct;
-    fn.isAnalyticFn_ = isAnalyticFn;
-    fn.isAggregateFn_ = true;
-    fn.returnsNonNullOnEmpty_ = returnsNonNullOnEmpty;
-    fn.setIsPersistent(true);
-    return fn;
-  }
-
-  public static AggregateFunction createAnalyticBuiltin(Db db, String name,
-      List<Type> argTypes, Type retType, Type intermediateType) {
-    return createAnalyticBuiltin(db, name, argTypes, retType, intermediateType, null,
-        null, null, null, null, true);
-  }
-
-  public static AggregateFunction createAnalyticBuiltin(Db db, String name,
-      List<Type> argTypes, Type retType, Type intermediateType,
-      String initFnSymbol, String updateFnSymbol, String removeFnSymbol,
-      String getValueFnSymbol, String finalizeFnSymbol) {
-    return createAnalyticBuiltin(db, name, argTypes, retType, intermediateType,
-        initFnSymbol, updateFnSymbol, removeFnSymbol, getValueFnSymbol, finalizeFnSymbol,
-        true);
-  }
-
-  public static AggregateFunction createAnalyticBuiltin(Db db, String name,
-      List<Type> argTypes, Type retType, Type intermediateType,
-      String initFnSymbol, String updateFnSymbol, String removeFnSymbol,
-      String getValueFnSymbol, String finalizeFnSymbol, boolean isUserVisible) {
-    AggregateFunction fn = new AggregateFunction(new FunctionName(db.getName(), name),
-        argTypes, retType, intermediateType, null, updateFnSymbol, initFnSymbol,
-        null, null, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol);
-    fn.setBinaryType(TFunctionBinaryType.BUILTIN);
-    fn.ignoresDistinct_ = false;
-    fn.isAnalyticFn_ = true;
-    fn.isAggregateFn_ = false;
-    fn.returnsNonNullOnEmpty_ = false;
-    fn.setUserVisible(isUserVisible);
-    fn.setIsPersistent(true);
-    return fn;
-  }
-
-  public String getUpdateFnSymbol() { return updateFnSymbol_; }
-  public String getInitFnSymbol() { return initFnSymbol_; }
-  public String getSerializeFnSymbol() { return serializeFnSymbol_; }
-  public String getMergeFnSymbol() { return mergeFnSymbol_; }
-  public String getGetValueFnSymbol() { return getValueFnSymbol_; }
-  public String getRemoveFnSymbol() { return removeFnSymbol_; }
-  public String getFinalizeFnSymbol() { return finalizeFnSymbol_; }
-  public boolean ignoresDistinct() { return ignoresDistinct_; }
-  public boolean isAnalyticFn() { return isAnalyticFn_; }
-  public boolean isAggregateFn() { return isAggregateFn_; }
-  public boolean returnsNonNullOnEmpty() { return returnsNonNullOnEmpty_; }
-
-  /**
-   * Returns the intermediate type of this aggregate function or null
-   * if it is identical to the return type.
-   */
-  public Type getIntermediateType() { return intermediateType_; }
-  public void setUpdateFnSymbol(String fn) { updateFnSymbol_ = fn; }
-  public void setInitFnSymbol(String fn) { initFnSymbol_ = fn; }
-  public void setSerializeFnSymbol(String fn) { serializeFnSymbol_ = fn; }
-  public void setMergeFnSymbol(String fn) { mergeFnSymbol_ = fn; }
-  public void setGetValueFnSymbol(String fn) { getValueFnSymbol_ = fn; }
-  public void setRemoveFnSymbol(String fn) { removeFnSymbol_ = fn; }
-  public void setFinalizeFnSymbol(String fn) { finalizeFnSymbol_ = fn; }
-  public void setIntermediateType(Type t) { intermediateType_ = t; }
-
-  @Override
-  public String toSql(boolean ifNotExists) {
-    StringBuilder sb = new StringBuilder("CREATE AGGREGATE FUNCTION ");
-    if (ifNotExists) sb.append("IF NOT EXISTS ");
-    sb.append(dbName() + "." + signatureString() + "\n")
-      .append(" RETURNS " + getReturnType() + "\n");
-    if (getIntermediateType() != null) {
-      sb.append(" INTERMEDIATE " + getIntermediateType() + "\n");
-    }
-    sb.append(" LOCATION '" + getLocation() + "'\n")
-      .append(" UPDATE_FN='" + getUpdateFnSymbol() + "'\n")
-      .append(" INIT_FN='" + getInitFnSymbol() + "'\n")
-      .append(" MERGE_FN='" + getMergeFnSymbol() + "'\n");
-    if (getSerializeFnSymbol() != null) {
-      sb.append(" SERIALIZE_FN='" + getSerializeFnSymbol() + "'\n");
-    }
-    if (getFinalizeFnSymbol() != null) {
-      sb.append(" FINALIZE_FN='" + getFinalizeFnSymbol() + "'\n");
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public TFunction toThrift() {
-    TFunction fn = super.toThrift();
-    TAggregateFunction agg_fn = new TAggregateFunction();
-    agg_fn.setUpdate_fn_symbol(updateFnSymbol_);
-    agg_fn.setInit_fn_symbol(initFnSymbol_);
-    if (serializeFnSymbol_ != null) agg_fn.setSerialize_fn_symbol(serializeFnSymbol_);
-    agg_fn.setMerge_fn_symbol(mergeFnSymbol_);
-    if (getValueFnSymbol_  != null) agg_fn.setGet_value_fn_symbol(getValueFnSymbol_);
-    if (removeFnSymbol_  != null) agg_fn.setRemove_fn_symbol(removeFnSymbol_);
-    if (finalizeFnSymbol_  != null) agg_fn.setFinalize_fn_symbol(finalizeFnSymbol_);
-    if (intermediateType_ != null) {
-      agg_fn.setIntermediate_type(intermediateType_.toThrift());
-    } else {
-      agg_fn.setIntermediate_type(getReturnType().toThrift());
-    }
-    agg_fn.setIgnores_distinct(ignoresDistinct_);
-    fn.setAggregate_fn(agg_fn);
-    return fn;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ArrayType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ArrayType.java b/fe/src/main/java/com/cloudera/impala/catalog/ArrayType.java
deleted file mode 100644
index 25edde2..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ArrayType.java
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Describes an ARRAY type.
- */
-public class ArrayType extends Type {
-  private final Type itemType_;
-
-  public ArrayType(Type itemType) {
-    itemType_ = itemType;
-  }
-
-  public Type getItemType() { return itemType_; }
-
-  @Override
-  public String toSql(int depth) {
-    if (depth >= MAX_NESTING_DEPTH) return "ARRAY<...>";
-    return String.format("ARRAY<%s>", itemType_.toSql(depth + 1));
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof ArrayType)) return false;
-    ArrayType otherArrayType = (ArrayType) other;
-    return otherArrayType.itemType_.equals(itemType_);
-  }
-
-  @Override
-  public void toThrift(TColumnType container) {
-    TTypeNode node = new TTypeNode();
-    container.types.add(node);
-    Preconditions.checkNotNull(itemType_);
-    node.setType(TTypeNodeType.ARRAY);
-    itemType_.toThrift(container);
-  }
-
-  @Override
-  protected String prettyPrint(int lpad) {
-    String leftPadding = StringUtils.repeat(' ', lpad);
-    if (itemType_.isScalarType()) return leftPadding + toSql();
-    // Pass in the padding to make sure nested fields are aligned properly,
-    // even if we then strip the top-level padding.
-    String structStr = itemType_.prettyPrint(lpad);
-    structStr = structStr.substring(lpad);
-    return String.format("%sARRAY<%s>", leftPadding, structStr);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationException.java b/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationException.java
deleted file mode 100644
index ab97c83..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown for authorization errors encountered when accessing Catalog objects.
- */
-public class AuthorizationException extends CatalogException {
-  public AuthorizationException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  public AuthorizationException(String msg) {
-    super(msg);
-  }
-}
\ No newline at end of file


[40/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
deleted file mode 100644
index c5965db..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
+++ /dev/null
@@ -1,697 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import com.cloudera.impala.planner.TableSink;
-import com.google.common.collect.ImmutableList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.planner.DataSink;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Representation of a single insert statement, including the select statement
- * whose results are to be inserted.
- */
-public class InsertStmt extends StatementBase {
-  private final static Logger LOG = LoggerFactory.getLogger(InsertStmt.class);
-
-  // Target table name as seen by the parser
-  private final TableName originalTableName_;
-
-  // Differentiates between INSERT INTO and INSERT OVERWRITE.
-  private final boolean overwrite_;
-
-  // List of column:value elements from the PARTITION (...) clause.
-  // Set to null if no partition was given.
-  private final List<PartitionKeyValue> partitionKeyValues_;
-
-  // User-supplied hints to control hash partitioning before the table sink in the plan.
-  private final List<String> planHints_;
-
-  // False if the original insert statement had a query statement, true if we need to
-  // auto-generate one (for insert into tbl()) during analysis.
-  private final boolean needsGeneratedQueryStatement_;
-
-  // The column permutation is specified by writing INSERT INTO tbl(col3, col1, col2...)
-  //
-  // It is a mapping from select-list expr index to (non-partition) output column. If
-  // null, will be set to the default permutation of all non-partition columns in Hive
-  // order.
-  //
-  // A column is said to be 'mentioned' if it occurs either in the column permutation, or
-  // the PARTITION clause. If columnPermutation is null, all non-partition columns are
-  // considered mentioned.
-  //
-  // Between them, the columnPermutation and the set of partitionKeyValues must mention to
-  // every partition column in the target table exactly once. Other columns, if not
-  // explicitly mentioned, will be assigned NULL values. Partition columns are not
-  // defaulted to NULL by design, and are not just for NULL-valued partition slots.
-  //
-  // Dynamic partition keys may occur in either the permutation or the PARTITION
-  // clause. Partition columns with static values may only be mentioned in the PARTITION
-  // clause, where the static value is specified.
-  private final List<String> columnPermutation_;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // List of inline views that may be referenced in queryStmt.
-  private final WithClause withClause_;
-
-  // Target table into which to insert. May be qualified by analyze()
-  private TableName targetTableName_;
-
-  // Select or union whose results are to be inserted. If null, will be set after
-  // analysis.
-  private QueryStmt queryStmt_;
-
-  // Set in analyze(). Contains metadata of target table to determine type of sink.
-  private Table table_;
-
-  // Set in analyze(). Exprs corresponding to the partitionKeyValues,
-  private List<Expr> partitionKeyExprs_ = Lists.newArrayList();
-
-  // Indicates whether this insert stmt has a shuffle or noshuffle plan hint.
-  // Both flags may be false. Only one of them may be true, not both.
-  // Shuffle forces data repartitioning before then data sink, and noshuffle
-  // prevents it. Set in analyze() based on planHints_.
-  private boolean hasShuffleHint_ = false;
-  private boolean hasNoShuffleHint_ = false;
-
-  // Output expressions that produce the final results to write to the target table. May
-  // include casts, and NullLiterals where an output column isn't explicitly mentioned.
-  // Set in prepareExpressions(). The i'th expr produces the i'th column of the target
-  // table.
-  private ArrayList<Expr> resultExprs_ = Lists.newArrayList();
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  // For tables with primary keys, indicates if duplicate key errors are ignored.
-  private final boolean ignoreDuplicates_;
-
-  public InsertStmt(WithClause withClause, TableName targetTable, boolean overwrite,
-      List<PartitionKeyValue> partitionKeyValues, List<String> planHints,
-      QueryStmt queryStmt, List<String> columnPermutation, boolean ignoreDuplicates) {
-    withClause_ = withClause;
-    targetTableName_ = targetTable;
-    originalTableName_ = targetTableName_;
-    overwrite_ = overwrite;
-    partitionKeyValues_ = partitionKeyValues;
-    planHints_ = planHints;
-    queryStmt_ = queryStmt;
-    needsGeneratedQueryStatement_ = (queryStmt == null);
-    columnPermutation_ = columnPermutation;
-    table_ = null;
-    ignoreDuplicates_ = ignoreDuplicates;
-  }
-
-  /**
-   * C'tor used in clone().
-   */
-  private InsertStmt(InsertStmt other) {
-    super(other);
-    withClause_ = other.withClause_ != null ? other.withClause_.clone() : null;
-    targetTableName_ = other.targetTableName_;
-    originalTableName_ = other.originalTableName_;
-    overwrite_ = other.overwrite_;
-    partitionKeyValues_ = other.partitionKeyValues_;
-    planHints_ = other.planHints_;
-    queryStmt_ = other.queryStmt_ != null ? other.queryStmt_.clone() : null;
-    needsGeneratedQueryStatement_ = other.needsGeneratedQueryStatement_;
-    columnPermutation_ = other.columnPermutation_;
-    table_ = other.table_;
-    ignoreDuplicates_ = other.ignoreDuplicates_;
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    if (withClause_ != null) withClause_.reset();
-    targetTableName_ = originalTableName_;
-    queryStmt_.reset();
-    table_ = null;
-    partitionKeyExprs_.clear();
-    hasShuffleHint_ = false;
-    hasNoShuffleHint_ = false;
-    resultExprs_.clear();
-  }
-
-  @Override
-  public InsertStmt clone() { return new InsertStmt(this); }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    super.analyze(analyzer);
-    try {
-      if (withClause_ != null) withClause_.analyze(analyzer);
-    } catch (AnalysisException e) {
-      // Ignore AnalysisExceptions if tables are missing to ensure the maximum number
-      // of missing tables can be collected before failing analyze().
-      if (analyzer.getMissingTbls().isEmpty()) throw e;
-    }
-
-    List<Expr> selectListExprs = null;
-    if (!needsGeneratedQueryStatement_) {
-      try {
-        // Use a child analyzer for the query stmt to properly scope WITH-clause
-        // views and to ignore irrelevant ORDER BYs.
-        Analyzer queryStmtAnalyzer = new Analyzer(analyzer);
-        queryStmt_.analyze(queryStmtAnalyzer);
-        // Subqueries need to be rewritten by the StmtRewriter first.
-        if (analyzer.containsSubquery()) return;
-        // Use getResultExprs() and not getBaseTblResultExprs() here because the final
-        // substitution with TupleIsNullPredicate() wrapping happens in planning.
-        selectListExprs = Expr.cloneList(queryStmt_.getResultExprs());
-      } catch (AnalysisException e) {
-        if (analyzer.getMissingTbls().isEmpty()) throw e;
-      }
-    } else {
-      selectListExprs = Lists.newArrayList();
-    }
-
-    // Set target table and perform table-type specific analysis and auth checking.
-    // Also checks if the target table is missing.
-    setTargetTable(analyzer);
-
-    // Abort analysis if there are any missing tables beyond this point.
-    if (!analyzer.getMissingTbls().isEmpty()) {
-      throw new AnalysisException("Found missing tables. Aborting analysis.");
-    }
-
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-
-    // Analysis of the INSERT statement from this point is basically the act of matching
-    // the set of output columns (which come from a column permutation, perhaps
-    // implicitly, and the PARTITION clause) to the set of input columns (which come from
-    // the select-list and any statically-valued columns in the PARTITION clause).
-    //
-    // First, we compute the set of mentioned columns, and reject statements that refer to
-    // non-existent columns, or duplicates (we must check both the column permutation, and
-    // the set of partition keys). Next, we check that all partition columns are
-    // mentioned. During this process we build the map from select-list expr index to
-    // column in the targeted table.
-    //
-    // Then we check that the select-list contains exactly the right number of expressions
-    // for all mentioned columns which are not statically-valued partition columns (which
-    // get their expressions from partitionKeyValues).
-    //
-    // Finally, prepareExpressions analyzes the expressions themselves, and confirms that
-    // they are type-compatible with the target columns. Where columns are not mentioned
-    // (and by this point, we know that missing columns are not partition columns),
-    // prepareExpressions assigns them a NULL literal expressions.
-
-    // An null permutation clause is the same as listing all non-partition columns in
-    // order.
-    List<String> analysisColumnPermutation = columnPermutation_;
-    if (analysisColumnPermutation == null) {
-      analysisColumnPermutation = Lists.newArrayList();
-      ArrayList<Column> tableColumns = table_.getColumns();
-      for (int i = numClusteringCols; i < tableColumns.size(); ++i) {
-        analysisColumnPermutation.add(tableColumns.get(i).getName());
-      }
-    }
-
-    // selectExprTargetColumns maps from select expression index to a column in the target
-    // table. It will eventually include all mentioned columns that aren't static-valued
-    // partition columns.
-    ArrayList<Column> selectExprTargetColumns = Lists.newArrayList();
-
-    // Tracks the name of all columns encountered in either the permutation clause or the
-    // partition clause to detect duplicates.
-    Set<String> mentionedColumnNames = Sets.newHashSet();
-    for (String columnName: analysisColumnPermutation) {
-      Column column = table_.getColumn(columnName);
-      if (column == null) {
-        throw new AnalysisException(
-            "Unknown column '" + columnName + "' in column permutation");
-      }
-
-      if (!mentionedColumnNames.add(columnName)) {
-        throw new AnalysisException(
-            "Duplicate column '" + columnName + "' in column permutation");
-      }
-      selectExprTargetColumns.add(column);
-    }
-
-    int numStaticPartitionExprs = 0;
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        Column column = table_.getColumn(pkv.getColName());
-        if (column == null) {
-          throw new AnalysisException("Unknown column '" + pkv.getColName() +
-                                      "' in partition clause");
-        }
-
-        if (column.getPosition() >= numClusteringCols) {
-          throw new AnalysisException(
-              "Column '" + pkv.getColName() + "' is not a partition column");
-        }
-
-        if (!mentionedColumnNames.add(pkv.getColName())) {
-          throw new AnalysisException(
-              "Duplicate column '" + pkv.getColName() + "' in partition clause");
-        }
-        if (!pkv.isDynamic()) {
-          numStaticPartitionExprs++;
-        } else {
-          selectExprTargetColumns.add(column);
-        }
-      }
-    }
-
-    // Checks that exactly all columns in the target table are assigned an expr.
-    checkColumnCoverage(selectExprTargetColumns, mentionedColumnNames,
-        selectListExprs.size(), numStaticPartitionExprs);
-
-    // Make sure static partition key values only contain const exprs.
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue kv: partitionKeyValues_) {
-        kv.analyze(analyzer);
-      }
-    }
-
-    // Populate partitionKeyExprs from partitionKeyValues and selectExprTargetColumns
-    prepareExpressions(selectExprTargetColumns, selectListExprs, table_, analyzer);
-    // Analyze plan hints at the end to prefer reporting other error messages first
-    // (e.g., the PARTITION clause is not applicable to unpartitioned and HBase tables).
-    analyzePlanHints(analyzer);
-  }
-
-  /**
-   * Sets table_ based on targetTableName_ and performs table-type specific analysis:
-   * - Partition clause is invalid for unpartitioned Hdfs tables and HBase tables
-   * - Overwrite is invalid for HBase tables
-   * - Check INSERT privileges as well as write access to Hdfs paths
-   * - Cannot insert into a view
-   * Adds table_ to the analyzer's descriptor table if analysis succeeds.
-   */
-  private void setTargetTable(Analyzer analyzer) throws AnalysisException {
-    // If the table has not yet been set, load it from the Catalog. This allows for
-    // callers to set a table to analyze that may not actually be created in the Catalog.
-    // One example use case is CREATE TABLE AS SELECT which must run analysis on the
-    // INSERT before the table has actually been created.
-    if (table_ == null) {
-      if (!targetTableName_.isFullyQualified()) {
-        targetTableName_ =
-            new TableName(analyzer.getDefaultDb(), targetTableName_.getTbl());
-      }
-      table_ = analyzer.getTable(targetTableName_, Privilege.INSERT);
-    } else {
-      targetTableName_ = new TableName(table_.getDb().getName(), table_.getName());
-      PrivilegeRequestBuilder pb = new PrivilegeRequestBuilder();
-      analyzer.registerPrivReq(pb.onTable(table_.getDb().getName(), table_.getName())
-          .allOf(Privilege.INSERT).toRequest());
-    }
-
-    // We do not support inserting into views.
-    if (table_ instanceof View) {
-      throw new AnalysisException(
-          String.format("Impala does not support inserting into views: %s",
-          table_.getFullName()));
-    }
-
-    for (Column c: table_.getColumns()) {
-      if (!c.getType().isSupported()) {
-        throw new AnalysisException(String.format("Unable to INSERT into target table " +
-            "(%s) because the column '%s' has an unsupported type '%s'.",
-            targetTableName_, c.getName(), c.getType().toSql()));
-      }
-    }
-
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-
-    if (partitionKeyValues_ != null && numClusteringCols == 0) {
-      if (isHBaseTable) {
-        throw new AnalysisException("PARTITION clause is not valid for INSERT into " +
-            "HBase tables. '" + targetTableName_ + "' is an HBase table");
-
-      } else {
-        // Unpartitioned table, but INSERT has PARTITION clause
-        throw new AnalysisException("PARTITION clause is only valid for INSERT into " +
-            "partitioned table. '" + targetTableName_ + "' is not partitioned");
-      }
-    }
-
-    if (table_ instanceof HdfsTable) {
-      HdfsTable hdfsTable = (HdfsTable) table_;
-      if (!hdfsTable.hasWriteAccess()) {
-        throw new AnalysisException(String.format("Unable to INSERT into target table " +
-            "(%s) because Impala does not have WRITE access to at least one HDFS path" +
-            ": %s", targetTableName_, hdfsTable.getFirstLocationWithoutWriteAccess()));
-      }
-      StringBuilder error = new StringBuilder();
-      hdfsTable.parseSkipHeaderLineCount(error);
-      if (error.length() > 0) throw new AnalysisException(error.toString());
-      try {
-        if (!FileSystemUtil.isImpalaWritableFilesystem(hdfsTable.getLocation())) {
-          throw new AnalysisException(String.format("Unable to INSERT into target " +
-              "table (%s) because %s is not a supported filesystem.", targetTableName_,
-              hdfsTable.getLocation()));
-        }
-      } catch (IOException e) {
-        throw new AnalysisException(String.format("Unable to INSERT into target " +
-            "table (%s): %s.", targetTableName_, e.getMessage()), e);
-      }
-      for (int colIdx = 0; colIdx < numClusteringCols; ++colIdx) {
-        Column col = hdfsTable.getColumns().get(colIdx);
-        // Hive has a number of issues handling BOOLEAN partition columns (see HIVE-6590).
-        // Instead of working around the Hive bugs, INSERT is disabled for BOOLEAN
-        // partitions in Impala. Once the Hive JIRA is resolved, we can remove this
-        // analysis check.
-        if (col.getType() == Type.BOOLEAN) {
-          throw new AnalysisException(String.format("INSERT into table with BOOLEAN " +
-              "partition column (%s) is not supported: %s", col.getName(),
-              targetTableName_));
-        }
-      }
-    }
-
-    if (table_ instanceof KuduTable) {
-      if (overwrite_) {
-        throw new AnalysisException("INSERT OVERWRITE not supported for Kudu tables.");
-      }
-      if (partitionKeyValues_ != null && !partitionKeyValues_.isEmpty()) {
-        throw new AnalysisException(
-            "Partition specifications are not supported for Kudu tables.");
-      }
-    }
-
-    if (isHBaseTable && overwrite_) {
-      throw new AnalysisException("HBase doesn't have a way to perform INSERT OVERWRITE");
-    }
-
-    // Add target table to descriptor table.
-    analyzer.getDescTbl().addReferencedTable(table_);
-  }
-
-  /**
-   * Checks that the column permutation + select list + static partition exprs +
-   * dynamic partition exprs collectively cover exactly all columns in the target table
-   * (not more of fewer).
-   */
-  private void checkColumnCoverage(ArrayList<Column> selectExprTargetColumns,
-      Set<String> mentionedColumnNames, int numSelectListExprs,
-      int numStaticPartitionExprs) throws AnalysisException {
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-    // Check that all columns are mentioned by the permutation and partition clauses
-    if (selectExprTargetColumns.size() + numStaticPartitionExprs !=
-        table_.getColumns().size()) {
-      // We've already ruled out too many columns in the permutation and partition clauses
-      // by checking that there are no duplicates and that every column mentioned actually
-      // exists. So all columns aren't mentioned in the query. If the unmentioned columns
-      // include partition columns, this is an error.
-      List<String> missingColumnNames = Lists.newArrayList();
-      for (Column column: table_.getColumns()) {
-        if (!mentionedColumnNames.contains(column.getName())) {
-          // HBase tables have a single row-key column which is always in position 0. It
-          // must be mentioned, since it is invalid to set it to NULL (which would
-          // otherwise happen by default).
-          if (isHBaseTable && column.getPosition() == 0) {
-            throw new AnalysisException("Row-key column '" + column.getName() +
-                "' must be explicitly mentioned in column permutation.");
-          }
-          if (column.getPosition() < numClusteringCols) {
-            missingColumnNames.add(column.getName());
-          }
-        }
-      }
-
-      if (!missingColumnNames.isEmpty()) {
-        throw new AnalysisException(
-            "Not enough partition columns mentioned in query. Missing columns are: " +
-            Joiner.on(", ").join(missingColumnNames));
-      }
-    }
-
-    // Expect the selectListExpr to have entries for every target column
-    if (selectExprTargetColumns.size() != numSelectListExprs) {
-      String comparator =
-          (selectExprTargetColumns.size() < numSelectListExprs) ? "fewer" : "more";
-      String partitionClause =
-          (partitionKeyValues_ == null) ? "returns" : "and PARTITION clause return";
-
-      // If there was no column permutation provided, the error is that the select-list
-      // has the wrong number of expressions compared to the number of columns in the
-      // table. If there was a column permutation, then the mismatch is between the
-      // select-list and the permutation itself.
-      if (columnPermutation_ == null) {
-        int totalColumnsMentioned = numSelectListExprs + numStaticPartitionExprs;
-        throw new AnalysisException(String.format(
-            "Target table '%s' has %s columns (%s) than the SELECT / VALUES clause %s" +
-            " (%s)", table_.getFullName(), comparator,
-            table_.getColumns().size(), partitionClause, totalColumnsMentioned));
-      } else {
-        String partitionPrefix =
-            (partitionKeyValues_ == null) ? "mentions" : "and PARTITION clause mention";
-        throw new AnalysisException(String.format(
-            "Column permutation %s %s columns (%s) than " +
-            "the SELECT / VALUES clause %s (%s)", partitionPrefix, comparator,
-            selectExprTargetColumns.size(), partitionClause, numSelectListExprs));
-      }
-    }
-  }
-
-  /**
-   * Performs three final parts of the analysis:
-   * 1. Checks type compatibility between all expressions and their targets
-   *
-   * 2. Populates partitionKeyExprs with type-compatible expressions, in Hive
-   * partition-column order, for all partition columns
-   *
-   * 3. Populates resultExprs_ with type-compatible expressions, in Hive column order,
-   * for all expressions in the select-list. Unmentioned columns are assigned NULL literal
-   * expressions.
-   *
-   * If necessary, adds casts to the expressions to make them compatible with the type of
-   * the corresponding column.
-   *
-   * @throws AnalysisException
-   *           If an expression is not compatible with its target column
-   */
-  private void prepareExpressions(List<Column> selectExprTargetColumns,
-      List<Expr> selectListExprs, Table tbl, Analyzer analyzer)
-      throws AnalysisException {
-    // Temporary lists of partition key exprs and names in an arbitrary order.
-    List<Expr> tmpPartitionKeyExprs = new ArrayList<Expr>();
-    List<String> tmpPartitionKeyNames = new ArrayList<String>();
-
-    int numClusteringCols = (tbl instanceof HBaseTable) ? 0 : tbl.getNumClusteringCols();
-
-    // Check dynamic partition columns for type compatibility.
-    for (int i = 0; i < selectListExprs.size(); ++i) {
-      Column targetColumn = selectExprTargetColumns.get(i);
-      Expr compatibleExpr = checkTypeCompatibility(
-          targetTableName_.toString(), targetColumn, selectListExprs.get(i));
-      if (targetColumn.getPosition() < numClusteringCols) {
-        // This is a dynamic clustering column
-        tmpPartitionKeyExprs.add(compatibleExpr);
-        tmpPartitionKeyNames.add(targetColumn.getName());
-      }
-      selectListExprs.set(i, compatibleExpr);
-    }
-
-    // Check static partition columns, dynamic entries in partitionKeyValues will already
-    // be in selectExprTargetColumns and therefore are ignored in this loop
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        if (pkv.isStatic()) {
-          // tableColumns is guaranteed to exist after the earlier analysis checks
-          Column tableColumn = table_.getColumn(pkv.getColName());
-          Expr compatibleExpr = checkTypeCompatibility(
-              targetTableName_.toString(), tableColumn, pkv.getValue());
-          tmpPartitionKeyExprs.add(compatibleExpr);
-          tmpPartitionKeyNames.add(pkv.getColName());
-        }
-      }
-    }
-
-    // Reorder the partition key exprs and names to be consistent with the target table
-    // declaration.  We need those exprs in the original order to create the corresponding
-    // Hdfs folder structure correctly.
-    for (Column c: table_.getColumns()) {
-      for (int j = 0; j < tmpPartitionKeyNames.size(); ++j) {
-        if (c.getName().equals(tmpPartitionKeyNames.get(j))) {
-          partitionKeyExprs_.add(tmpPartitionKeyExprs.get(j));
-          break;
-        }
-      }
-    }
-
-    Preconditions.checkState(partitionKeyExprs_.size() == numClusteringCols);
-    // Make sure we have stats for partitionKeyExprs
-    for (Expr expr: partitionKeyExprs_) {
-      expr.analyze(analyzer);
-    }
-
-    // Finally, 'undo' the permutation so that the selectListExprs are in Hive column
-    // order, and add NULL expressions to all missing columns.
-    for (Column tblColumn: table_.getColumnsInHiveOrder()) {
-      boolean matchFound = false;
-      for (int i = 0; i < selectListExprs.size(); ++i) {
-        if (selectExprTargetColumns.get(i).getName().equals(tblColumn.getName())) {
-          resultExprs_.add(selectListExprs.get(i));
-          matchFound = true;
-          break;
-        }
-      }
-      // If no match is found, either the column is a clustering column with a static
-      // value, or it was unmentioned and therefore should have a NULL select-list
-      // expression.
-      if (!matchFound) {
-        if (tblColumn.getPosition() >= numClusteringCols) {
-          // Unmentioned non-clustering columns get NULL literals with the appropriate
-          // target type because Parquet cannot handle NULL_TYPE (IMPALA-617).
-          resultExprs_.add(NullLiteral.create(tblColumn.getType()));
-        }
-      }
-    }
-    // TODO: Check that HBase row-key columns are not NULL? See IMPALA-406
-    if (needsGeneratedQueryStatement_) {
-      // Build a query statement that returns NULL for every column
-      List<SelectListItem> selectListItems = Lists.newArrayList();
-      for(Expr e: resultExprs_) {
-        selectListItems.add(new SelectListItem(e, null));
-      }
-      SelectList selectList = new SelectList(selectListItems);
-      queryStmt_ = new SelectStmt(selectList, null, null, null, null, null, null);
-      queryStmt_.analyze(analyzer);
-    }
-  }
-
-  private void analyzePlanHints(Analyzer analyzer) throws AnalysisException {
-    if (planHints_ == null) return;
-    if (!planHints_.isEmpty() && table_ instanceof HBaseTable) {
-      throw new AnalysisException("INSERT hints are only supported for inserting into " +
-          "Hdfs tables.");
-    }
-    for (String hint: planHints_) {
-      if (hint.equalsIgnoreCase("SHUFFLE")) {
-        if (hasNoShuffleHint_) {
-          throw new AnalysisException("Conflicting INSERT hint: " + hint);
-        }
-        hasShuffleHint_ = true;
-        analyzer.setHasPlanHints();
-      } else if (hint.equalsIgnoreCase("NOSHUFFLE")) {
-        if (hasShuffleHint_) {
-          throw new AnalysisException("Conflicting INSERT hint: " + hint);
-        }
-        hasNoShuffleHint_ = true;
-        analyzer.setHasPlanHints();
-      } else {
-        analyzer.addWarning("INSERT hint not recognized: " + hint);
-      }
-    }
-    // Both flags may be false or one of them may be true, but not both.
-    Preconditions.checkState((!hasShuffleHint_ && !hasNoShuffleHint_)
-        || (hasShuffleHint_ ^ hasNoShuffleHint_));
-  }
-
-  public List<String> getPlanHints() { return planHints_; }
-  public TableName getTargetTableName() { return targetTableName_; }
-  public Table getTargetTable() { return table_; }
-  public void setTargetTable(Table table) { this.table_ = table; }
-  public boolean isOverwrite() { return overwrite_; }
-
-  /**
-   * Only valid after analysis
-   */
-  public QueryStmt getQueryStmt() { return queryStmt_; }
-  public void setQueryStmt(QueryStmt stmt) { queryStmt_ = stmt; }
-  public List<Expr> getPartitionKeyExprs() { return partitionKeyExprs_; }
-  public boolean hasShuffleHint() { return hasShuffleHint_; }
-  public boolean hasNoShuffleHint() { return hasNoShuffleHint_; }
-  public ArrayList<Expr> getResultExprs() { return resultExprs_; }
-
-  public DataSink createDataSink() {
-    // analyze() must have been called before.
-    Preconditions.checkState(table_ != null);
-    return TableSink.create(table_, TableSink.Op.INSERT, partitionKeyExprs_,
-        ImmutableList.<Integer>of(), overwrite_, ignoreDuplicates_);
-  }
-
-  /**
-   * Substitutes the result expressions and the partition key expressions with smap.
-   * Preserves the original types of those expressions during the substitution.
-   */
-  public void substituteResultExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
-    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer, true);
-    partitionKeyExprs_ = Expr.substituteList(partitionKeyExprs_, smap, analyzer, true);
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder strBuilder = new StringBuilder();
-
-    if (withClause_ != null) strBuilder.append(withClause_.toSql() + " ");
-
-    strBuilder.append("INSERT ");
-    if (overwrite_) {
-      strBuilder.append("OVERWRITE ");
-    } else {
-      if (ignoreDuplicates_) strBuilder.append("IGNORE ");
-      strBuilder.append("INTO ");
-    }
-    strBuilder.append("TABLE " + originalTableName_);
-    if (columnPermutation_ != null) {
-      strBuilder.append("(");
-      strBuilder.append(Joiner.on(", ").join(columnPermutation_));
-      strBuilder.append(")");
-    }
-    if (partitionKeyValues_ != null) {
-      List<String> values = Lists.newArrayList();
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        values.add(pkv.getColName() +
-            (pkv.getValue() != null ? ("=" + pkv.getValue().toSql()) : ""));
-      }
-      strBuilder.append(" PARTITION (" + Joiner.on(", ").join(values) + ")");
-    }
-    if (planHints_ != null) {
-      strBuilder.append(" " + ToSqlUtils.getPlanHintsSql(planHints_));
-    }
-    if (!needsGeneratedQueryStatement_) {
-      strBuilder.append(" " + queryStmt_.toSql());
-    }
-    return strBuilder.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
deleted file mode 100644
index fd07a32..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Predicate that checks whether a collection is empty or not.
- * This predicate is not user-accessible from SQL, and may be
- * generated as a performance optimization for certain queries.
- * TODO: Pass this Predicate as a TExprNodeType.FUNCTION_CALL
- * to the BE just like the rest of our Predicates. This is not yet
- * done to avoid invasive changes required in FE/BE to deal with
- * resolution of functions with complex-types arguments,
- */
-public class IsNotEmptyPredicate extends Predicate {
-
-  public IsNotEmptyPredicate(Expr collectionExpr) {
-    super();
-    Preconditions.checkNotNull(collectionExpr);
-    children_.add(collectionExpr);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!getChild(0).getType().isCollectionType()) {
-      throw new AnalysisException("Operand must be a collection type: "
-          + getChild(0).toSql() + " is of type " + getChild(0).getType());
-    }
-    // Avoid influencing cardinality estimates.
-    selectivity_ = 1.0;
-    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + IS_NOT_EMPTY_COST;
-  }
-
-  @Override
-  public String toSqlImpl() { return "!empty(" + getChild(0).toSql() + ")"; }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.IS_NOT_EMPTY_PRED;
-  }
-
-  @Override
-  public Expr clone() { return new IsNotEmptyPredicate(getChild(0).clone()); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
deleted file mode 100644
index 6a75b5f..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
+++ /dev/null
@@ -1,186 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class IsNullPredicate extends Predicate {
-  private final boolean isNotNull_;
-
-  private static final String IS_NULL = "is_null_pred";
-  private static final String IS_NOT_NULL = "is_not_null_pred";
-
-  public IsNullPredicate(Expr e, boolean isNotNull) {
-    super();
-    this.isNotNull_ = isNotNull;
-    Preconditions.checkNotNull(e);
-    children_.add(e);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected IsNullPredicate(IsNullPredicate other) {
-    super(other);
-    isNotNull_ = other.isNotNull_;
-  }
-
-  public boolean isNotNull() { return isNotNull_; }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue;
-      String isNullSymbol;
-      if (t.isBoolean()) {
-        isNullSymbol = "_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10BooleanValE" +
-            "EES3_PNS2_15FunctionContextERKT_";
-      } else {
-        String udfType = Function.getUdfType(t);
-        isNullSymbol = "_ZN6impala15IsNullPredicate6IsNullIN10impala_udf" +
-            udfType.length() + udfType +
-            "EEENS2_10BooleanValEPNS2_15FunctionContextERKT_";
-      }
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          IS_NULL, isNullSymbol, Lists.newArrayList(t), Type.BOOLEAN));
-
-      String isNotNullSymbol = isNullSymbol.replace("6IsNull", "9IsNotNull");
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          IS_NOT_NULL, isNotNullSymbol, Lists.newArrayList(t), Type.BOOLEAN));
-    }
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((IsNullPredicate) obj).isNotNull_ == isNotNull_;
-  }
-
-  @Override
-  public String toSqlImpl() {
-    return getChild(0).toSql() + (isNotNull_ ? " IS NOT NULL" : " IS NULL");
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("notNull", isNotNull_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    if (contains(Subquery.class)) {
-      if (getChild(0) instanceof ExistsPredicate) {
-        // Replace the EXISTS subquery with a BoolLiteral as it can never return
-        // a null value.
-        setChild(0, new BoolLiteral(true));
-        getChild(0).analyze(analyzer);
-      } else if (!getChild(0).contains(Expr.IS_SCALAR_SUBQUERY)) {
-        // We only support scalar subqueries in an IS NULL predicate because
-        // they can be rewritten into a join.
-        // TODO: Add support for InPredicates and BinaryPredicates with
-        // subqueries when we implement independent subquery evaluation.
-        // TODO: Handle arbitrary UDA/Udfs
-        throw new AnalysisException("Unsupported IS NULL predicate that contains " +
-            "a subquery: " + toSqlImpl());
-      }
-    }
-
-    // Make sure the BE never sees TYPE_NULL
-    if (getChild(0).getType().isNull()) {
-      uncheckedCastChild(ScalarType.BOOLEAN, 0);
-    }
-
-    if (getChild(0).getType().isComplexType()) {
-      String errorMsg = (isNotNull_ ? "IS NOT NULL" : "IS NULL") +
-         " predicate does not support complex types: ";
-      throw new AnalysisException(errorMsg + toSqlImpl());
-    }
-
-    if (isNotNull_) {
-      fn_ = getBuiltinFunction(
-          analyzer, IS_NOT_NULL, collectChildReturnTypes(), CompareMode.IS_IDENTICAL);
-    } else {
-      fn_ = getBuiltinFunction(
-          analyzer, IS_NULL, collectChildReturnTypes(), CompareMode.IS_IDENTICAL);
-    }
-    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + IS_NULL_COST;
-
-    // determine selectivity
-    // TODO: increase this to make sure we don't end up favoring broadcast joins
-    // due to underestimated cardinalities?
-    Reference<SlotRef> slotRefRef = new Reference<SlotRef>();
-    if (isSingleColumnPredicate(slotRefRef, null)) {
-      SlotDescriptor slotDesc = slotRefRef.getRef().getDesc();
-      if (!slotDesc.getStats().hasNulls()) return;
-      Table table = slotDesc.getParent().getTable();
-      if (table != null && table.getNumRows() > 0) {
-        long numRows = table.getNumRows();
-        if (isNotNull_) {
-          selectivity_ =
-              (double) (numRows - slotDesc.getStats().getNumNulls()) / (double) numRows;
-        } else {
-          selectivity_ = (double) slotDesc.getStats().getNumNulls() / (double) numRows;
-        }
-        selectivity_ = Math.max(0.0, Math.min(1.0, selectivity_));
-      }
-    }
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  /*
-   * If predicate is of the form "<SlotRef> IS [NOT] NULL", returns the
-   * SlotRef.
-   */
-  @Override
-  public SlotRef getBoundSlot() {
-    return getChild(0).unwrapSlotRef(true);
-  }
-
-  /**
-   * Negates an IsNullPredicate.
-   */
-  @Override
-  public Expr negate() {
-    return new IsNullPredicate(getChild(0), !isNotNull_);
-  }
-
-  @Override
-  public Expr clone() { return new IsNullPredicate(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java b/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
deleted file mode 100644
index d50861b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
+++ /dev/null
@@ -1,112 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.thrift.TJoinOp;
-
-public enum JoinOperator {
-  INNER_JOIN("INNER JOIN", TJoinOp.INNER_JOIN),
-  LEFT_OUTER_JOIN("LEFT OUTER JOIN", TJoinOp.LEFT_OUTER_JOIN),
-  LEFT_SEMI_JOIN("LEFT SEMI JOIN", TJoinOp.LEFT_SEMI_JOIN),
-  LEFT_ANTI_JOIN("LEFT ANTI JOIN", TJoinOp.LEFT_ANTI_JOIN),
-  RIGHT_OUTER_JOIN("RIGHT OUTER JOIN", TJoinOp.RIGHT_OUTER_JOIN),
-  RIGHT_SEMI_JOIN("RIGHT SEMI JOIN", TJoinOp.RIGHT_SEMI_JOIN),
-  RIGHT_ANTI_JOIN("RIGHT ANTI JOIN", TJoinOp.RIGHT_ANTI_JOIN),
-  FULL_OUTER_JOIN("FULL OUTER JOIN", TJoinOp.FULL_OUTER_JOIN),
-  CROSS_JOIN("CROSS JOIN", TJoinOp.CROSS_JOIN),
-  // Variant of the LEFT ANTI JOIN that is used for the rewrite of
-  // NOT IN subqueries. It can have a single equality join conjunct
-  // that returns TRUE when the rhs is NULL.
-  NULL_AWARE_LEFT_ANTI_JOIN("NULL AWARE LEFT ANTI JOIN",
-      TJoinOp.NULL_AWARE_LEFT_ANTI_JOIN);
-
-  private final String description_;
-  private final TJoinOp thriftJoinOp_;
-
-  private JoinOperator(String description, TJoinOp thriftJoinOp) {
-    this.description_ = description;
-    this.thriftJoinOp_ = thriftJoinOp;
-  }
-
-  @Override
-  public String toString() {
-    return description_;
-  }
-
-  public TJoinOp toThrift() {
-    return thriftJoinOp_;
-  }
-
-  public boolean isInnerJoin() {
-    return this == INNER_JOIN;
-  }
-
-  public boolean isLeftOuterJoin() { return this == LEFT_OUTER_JOIN; }
-  public boolean isRightOuterJoin() { return this == RIGHT_OUTER_JOIN; }
-
-  public boolean isOuterJoin() {
-    return this == LEFT_OUTER_JOIN
-        || this == RIGHT_OUTER_JOIN
-        || this == FULL_OUTER_JOIN;
-  }
-
-  public boolean isSemiJoin() {
-    return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN ||
-        this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isLeftSemiJoin() {
-    return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isRightSemiJoin() {
-    return this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN;
-  }
-
-  public boolean isCrossJoin() {
-    return this == JoinOperator.CROSS_JOIN;
-  }
-
-  public boolean isFullOuterJoin() {
-    return this == JoinOperator.FULL_OUTER_JOIN;
-  }
-
-  public boolean isNullAwareLeftAntiJoin() {
-    return this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isAntiJoin() {
-    return this == JoinOperator.LEFT_ANTI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public JoinOperator invert() {
-    switch (this) {
-      case LEFT_OUTER_JOIN: return RIGHT_OUTER_JOIN;
-      case RIGHT_OUTER_JOIN: return LEFT_OUTER_JOIN;
-      case LEFT_SEMI_JOIN: return RIGHT_SEMI_JOIN;
-      case RIGHT_SEMI_JOIN: return LEFT_SEMI_JOIN;
-      case LEFT_ANTI_JOIN: return RIGHT_ANTI_JOIN;
-      case RIGHT_ANTI_JOIN: return LEFT_ANTI_JOIN;
-      case NULL_AWARE_LEFT_ANTI_JOIN: throw new IllegalStateException("Not implemented");
-      default: return this;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
deleted file mode 100644
index cb006bf..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
+++ /dev/null
@@ -1,169 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class LikePredicate extends Predicate {
-  enum Operator {
-    LIKE("LIKE"),
-    ILIKE("ILIKE"),
-    RLIKE("RLIKE"),
-    REGEXP("REGEXP"),
-    IREGEXP("IREGEXP");
-
-    private final String description_;
-
-    private Operator(String description) {
-      this.description_ = description;
-    }
-
-    @Override
-    public String toString() {
-      return description_;
-    }
-  }
-
-  public static void initBuiltins(Db db) {
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.LIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate4LikeEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate11LikePrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate9LikeCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.ILIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate4LikeEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12ILikePrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate9LikeCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.RLIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12RegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.REGEXP.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12RegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.IREGEXP.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate13IRegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-  }
-
-  private final Operator op_;
-
-  public LikePredicate(Operator op, Expr e1, Expr e2) {
-    super();
-    this.op_ = op;
-    Preconditions.checkNotNull(e1);
-    children_.add(e1);
-    Preconditions.checkNotNull(e2);
-    children_.add(e2);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  public LikePredicate(LikePredicate other) {
-    super(other);
-    op_ = other.op_;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((LikePredicate) obj).op_ == op_;
-  }
-
-  @Override
-  public String toSqlImpl() {
-    return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!getChild(0).getType().isStringType() && !getChild(0).getType().isNull()) {
-      throw new AnalysisException(
-          "left operand of " + op_.toString() + " must be of type STRING: " + toSql());
-    }
-    if (!getChild(1).getType().isStringType() && !getChild(1).getType().isNull()) {
-      throw new AnalysisException(
-          "right operand of " + op_.toString() + " must be of type STRING: " + toSql());
-    }
-
-    fn_ = getBuiltinFunction(analyzer, op_.toString(), collectChildReturnTypes(),
-        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    Preconditions.checkState(fn_ != null);
-    Preconditions.checkState(fn_.getReturnType().isBoolean());
-
-    if (getChild(1).isLiteral() && !getChild(1).isNullLiteral()
-        && (op_ == Operator.RLIKE || op_ == Operator.REGEXP || op_ == Operator.IREGEXP)) {
-      // let's make sure the pattern works
-      // TODO: this checks that it's a Java-supported regex, but the syntax supported
-      // by the backend is Posix; add a call to the backend to check the re syntax
-      try {
-        Pattern.compile(((StringLiteral) getChild(1)).getValue());
-      } catch (PatternSyntaxException e) {
-        throw new AnalysisException(
-            "invalid regular expression in '" + this.toSql() + "'");
-      }
-    }
-    castForFunctionCall(false);
-
-    if (hasChildCosts()) {
-      if (getChild(1).isLiteral() && !getChild(1).isNullLiteral() &&
-          Pattern.matches("[%_]*[^%_]*[%_]*", ((StringLiteral) getChild(1)).getValue())) {
-        // This pattern only has wildcards as leading or trailing character,
-        // so it is linear.
-        evalCost_ = getChildCosts() +
-            (float) (getAvgStringLength(getChild(0)) + getAvgStringLength(getChild(1)) *
-            BINARY_PREDICATE_COST) + LIKE_COST;
-      } else {
-        // This pattern is more expensive, so calculate its cost as quadratic.
-        evalCost_ = getChildCosts() +
-            (float) (getAvgStringLength(getChild(0)) * getAvgStringLength(getChild(1)) *
-            BINARY_PREDICATE_COST) + LIKE_COST;
-      }
-    }
-  }
-
-  @Override
-  public Expr clone() { return new LikePredicate(this); }
-
-  public Operator getOp() { return op_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java b/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
deleted file mode 100644
index 4de9501..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
+++ /dev/null
@@ -1,183 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.google.common.base.Preconditions;
-
-/**
- * Combination of limit and offset expressions.
- */
-class LimitElement {
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  private final Expr limitExpr_;
-  private final Expr offsetExpr_;
-  private long limit_;
-  private long offset_;
-  private boolean isAnalyzed_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  /**
-   * Constructs the LimitElement.
-   * @param limitExpr The limit expression. May be null if there is no LIMIT clause.
-   * @param offsetExpr The offset expression. May be null if there is no OFFSET clause.
-   */
-  public LimitElement(Expr limitExpr, Expr offsetExpr) {
-    this.limitExpr_ = limitExpr;
-    this.offsetExpr_ = offsetExpr;
-    isAnalyzed_ = false;
-    limit_ = -1;
-    offset_ = 0;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected LimitElement(LimitElement other) {
-    limitExpr_ = (other.limitExpr_ != null) ? other.limitExpr_.clone() : null;
-    offsetExpr_ = (other.offsetExpr_ != null) ? other.offsetExpr_.clone() : null;
-    limit_ = other.limit_;
-    offset_ = other.offset_;
-    isAnalyzed_ = other.isAnalyzed_;
-  }
-
-  public Expr getLimitExpr() { return limitExpr_; }
-  public Expr getOffsetExpr() { return offsetExpr_; }
-
-  /**
-   * Returns the integer limit, evaluated from the limit expression. Must call analyze()
-   * first. If no limit was set, then -1 is returned.
-   */
-  public long getLimit() {
-    Preconditions.checkState(isAnalyzed_);
-    return limit_;
-  }
-
-  public boolean hasLimit() {
-    Preconditions.checkState(isAnalyzed_);
-    return limit_ != -1;
-  }
-
-  /**
-   * Returns the integer offset, evaluated from the offset expression. Must call
-   * analyze() first. If no offsetExpr exists, then 0 (the default offset) is returned.
-   */
-  public long getOffset() {
-    Preconditions.checkState(isAnalyzed_);
-    return offset_;
-  }
-
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    if (limitExpr_ != null) {
-      sb.append(" LIMIT ");
-      sb.append(limitExpr_.toSql());
-    }
-    // Don't add the offset if it is the default value. However, we do print it if it
-    // hasn't been analyzed yet because we need to output the expression used in errors.
-    if (offsetExpr_ != null && (offset_ != 0 || !isAnalyzed_)) {
-      sb.append(" OFFSET ");
-      sb.append(offsetExpr_.toSql());
-    }
-    return sb.toString();
-  }
-
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    isAnalyzed_ = true;
-    if (limitExpr_ != null) {
-      if (!limitExpr_.isConstant()) {
-        throw new AnalysisException("LIMIT expression must be a constant expression: " +
-            limitExpr_.toSql());
-      }
-
-      limitExpr_.analyze(analyzer);
-      if (!limitExpr_.getType().isIntegerType()) {
-        throw new AnalysisException("LIMIT expression must be an integer type but is '" +
-            limitExpr_.getType() + "': " + limitExpr_.toSql());
-      }
-      limit_ = evalIntegerExpr(analyzer, limitExpr_, "LIMIT");
-    }
-    if (limit_ == 0) analyzer.setHasEmptyResultSet();
-
-    if (offsetExpr_ != null) {
-      if (!offsetExpr_.isConstant()) {
-        throw new AnalysisException("OFFSET expression must be a constant expression: " +
-            offsetExpr_.toSql());
-      }
-
-      offsetExpr_.analyze(analyzer);
-      if (!offsetExpr_.getType().isIntegerType()) {
-        throw new AnalysisException("OFFSET expression must be an integer type but " +
-            "is '" + offsetExpr_.getType() + "': " + offsetExpr_.toSql());
-      }
-      offset_ = evalIntegerExpr(analyzer, offsetExpr_, "OFFSET");
-    }
-  }
-
-  /**
-   * Evaluations an expression to a non-zero integral value, returned as a long. Throws
-   * if the expression cannot be evaluated, if the value evaluates to null, or if the
-   * result is negative. The 'name' parameter is used in exception messages, e.g.
-   * "LIMIT expression evaluates to NULL".
-   */
-  private static long evalIntegerExpr(Analyzer analyzer, Expr expr, String name)
-      throws AnalysisException {
-    TColumnValue val = null;
-    try {
-      val = FeSupport.EvalConstExpr(expr, analyzer.getQueryCtx());
-    } catch (InternalException e) {
-      throw new AnalysisException("Failed to evaluate expr: " + expr.toSql(), e);
-    }
-    long value;
-    if (val.isSetLong_val()) {
-      value = val.getLong_val();
-    } else if (val.isSetInt_val()) {
-      value = val.getInt_val();
-    } else if (val.isSetShort_val()) {
-      value = val.getShort_val();
-    } else if (val.isSetByte_val()) {
-      value = val.getByte_val();
-    } else {
-      throw new AnalysisException(name + " expression evaluates to NULL: " +
-          expr.toSql());
-    }
-    if (value < 0) {
-      throw new AnalysisException(name + " must be a non-negative integer: " +
-          expr.toSql() + " = " + value);
-    }
-    return value;
-  }
-
-  @Override
-  public LimitElement clone() { return new LimitElement(this); }
-
-  public void reset() {
-    isAnalyzed_ = false;
-    limit_ = -1;
-    offset_ = 0;
-    if (limitExpr_ != null) limitExpr_.reset();
-    if (offsetExpr_ != null) offsetExpr_.reset();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
deleted file mode 100644
index f5eedbb..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
+++ /dev/null
@@ -1,242 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a literal expression. Literals are comparable to allow
- * ordering of HdfsPartitions whose partition-key values are represented as literals.
- */
-public abstract class LiteralExpr extends Expr implements Comparable<LiteralExpr> {
-
-  public LiteralExpr() {
-    numDistinctValues_ = 1;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected LiteralExpr(LiteralExpr other) {
-    super(other);
-  }
-
-  /**
-   * Returns an analyzed literal of 'type'. Returns null for types that do not have a
-   * LiteralExpr subclass, e.g. TIMESTAMP.
-   */
-  public static LiteralExpr create(String value, Type type) throws AnalysisException {
-    Preconditions.checkArgument(type.isValid());
-    LiteralExpr e = null;
-    switch (type.getPrimitiveType()) {
-      case NULL_TYPE:
-        e = new NullLiteral();
-        break;
-      case BOOLEAN:
-        e = new BoolLiteral(value);
-        break;
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-      case FLOAT:
-      case DOUBLE:
-      case DECIMAL:
-        e = new NumericLiteral(value, type);
-        break;
-      case STRING:
-      case VARCHAR:
-      case CHAR:
-        e = new StringLiteral(value);
-        break;
-      case DATE:
-      case DATETIME:
-      case TIMESTAMP:
-        // TODO: we support TIMESTAMP but no way to specify it in SQL.
-        return null;
-      default:
-        Preconditions.checkState(false,
-            String.format("Literals of type '%s' not supported.", type.toSql()));
-    }
-    e.analyze(null);
-    // Need to cast since we cannot infer the type from the value. e.g. value
-    // can be parsed as tinyint but we need a bigint.
-    return (LiteralExpr) e.uncheckedCastTo(type);
-  }
-
-  /**
-   * Returns an analyzed literal from the thrift object.
-   */
-  public static LiteralExpr fromThrift(TExprNode exprNode, Type colType) {
-    try {
-      LiteralExpr result = null;
-      switch (exprNode.node_type) {
-        case FLOAT_LITERAL:
-          result = LiteralExpr.create(
-              Double.toString(exprNode.float_literal.value), colType);
-          break;
-        case DECIMAL_LITERAL:
-          byte[] bytes = exprNode.decimal_literal.getValue();
-          BigDecimal val = new BigDecimal(new BigInteger(bytes));
-          ScalarType decimalType = (ScalarType) colType;
-          // We store the decimal as the unscaled bytes. Need to adjust for the scale.
-          val = val.movePointLeft(decimalType.decimalScale());
-          result = new NumericLiteral(val, colType);
-          break;
-        case INT_LITERAL:
-          result = LiteralExpr.create(
-              Long.toString(exprNode.int_literal.value), colType);
-          break;
-        case STRING_LITERAL:
-          result = LiteralExpr.create(exprNode.string_literal.value, colType);
-          break;
-        case BOOL_LITERAL:
-          result =  LiteralExpr.create(
-              Boolean.toString(exprNode.bool_literal.value), colType);
-          break;
-        case NULL_LITERAL:
-          return NullLiteral.create(colType);
-        default:
-          throw new UnsupportedOperationException("Unsupported partition key type: " +
-              exprNode.node_type);
-      }
-      Preconditions.checkNotNull(result);
-      result.analyze(null);
-      return result;
-    } catch (Exception e) {
-      throw new IllegalStateException("Error creating LiteralExpr: ", e);
-    }
-  }
-
-  // Returns the string representation of the literal's value. Used when passing
-  // literal values to the metastore rather than to Impala backends. This is similar to
-  // the toSql() method, but does not perform any formatting of the string values. Neither
-  // method unescapes string values.
-  public abstract String getStringValue();
-
-  // Swaps the sign of numeric literals.
-  // Throws for non-numeric literals.
-  public void swapSign() throws NotImplementedException {
-    throw new NotImplementedException("swapSign() only implemented for numeric" +
-        "literals");
-  }
-
-  /**
-   * Evaluates the given constant expr and returns its result as a LiteralExpr.
-   * Assumes expr has been analyzed. Returns constExpr if is it already a LiteralExpr.
-   * Returns null for types that do not have a LiteralExpr subclass, e.g. TIMESTAMP.
-   * TODO: Support non-scalar types.
-   */
-  public static LiteralExpr create(Expr constExpr, TQueryCtx queryCtx)
-      throws AnalysisException {
-    Preconditions.checkState(constExpr.isConstant());
-    Preconditions.checkState(constExpr.getType().isValid());
-    if (constExpr instanceof LiteralExpr) return (LiteralExpr) constExpr;
-
-    TColumnValue val = null;
-    try {
-      val = FeSupport.EvalConstExpr(constExpr, queryCtx);
-    } catch (InternalException e) {
-      throw new AnalysisException(String.format("Failed to evaluate expr '%s'",
-          constExpr.toSql()), e);
-    }
-
-    LiteralExpr result = null;
-    switch (constExpr.getType().getPrimitiveType()) {
-      case NULL_TYPE:
-        result = new NullLiteral();
-        break;
-      case BOOLEAN:
-        if (val.isBool_val()) result = new BoolLiteral(val.bool_val);
-        break;
-      case TINYINT:
-        if (val.isSetByte_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.byte_val));
-        }
-        break;
-      case SMALLINT:
-        if (val.isSetShort_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.short_val));
-        }
-        break;
-      case INT:
-        if (val.isSetInt_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.int_val));
-        }
-        break;
-      case BIGINT:
-        if (val.isSetLong_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.long_val));
-        }
-        break;
-      case FLOAT:
-      case DOUBLE:
-        if (val.isSetDouble_val()) {
-          result =
-              new NumericLiteral(new BigDecimal(val.double_val), constExpr.getType());
-        }
-        break;
-      case DECIMAL:
-        if (val.isSetString_val()) {
-          result =
-              new NumericLiteral(new BigDecimal(val.string_val), constExpr.getType());
-        }
-        break;
-      case STRING:
-      case VARCHAR:
-      case CHAR:
-        if (val.isSetString_val()) result = new StringLiteral(val.string_val);
-        break;
-      case DATE:
-      case DATETIME:
-      case TIMESTAMP:
-        return null;
-      default:
-        Preconditions.checkState(false,
-            String.format("Literals of type '%s' not supported.",
-                constExpr.getType().toSql()));
-    }
-    // None of the fields in the thrift struct were set indicating a NULL.
-    if (result == null) result = new NullLiteral();
-
-    result.analyze(null);
-    return (LiteralExpr)result;
-  }
-
-  // Order NullLiterals based on the SQL ORDER BY default behavior: NULLS LAST.
-  @Override
-  public int compareTo(LiteralExpr other) {
-    if (this instanceof NullLiteral && other instanceof NullLiteral) return 0;
-    if (this instanceof NullLiteral) return -1;
-    if (other instanceof NullLiteral) return 1;
-    if (getClass() != other.getClass()) return -1;
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
deleted file mode 100644
index 109d70a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
+++ /dev/null
@@ -1,241 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.TAccessLevelUtil;
-import com.cloudera.impala.util.FsPermissionChecker;
-import com.google.common.base.Preconditions;
-
-/*
- * Represents a LOAD DATA statement for moving data into an existing table:
- * LOAD DATA INPATH 'filepath' [OVERWRITE] INTO TABLE <table name>
- * [PARTITION (partcol1=val1, partcol2=val2 ...)]
- *
- * The LOAD DATA operation supports loading (moving) a single file or all files in a
- * given source directory to a table or partition location. If OVERWRITE is true, all
- * exiting files in the destination will be removed before moving the new data in.
- * If OVERWRITE is false, existing files will be preserved. If there are any file name
- * conflicts, the new files will be uniquified by inserting a UUID into the file name
- * (preserving the extension).
- * Loading hidden files is not supported and any hidden files in the source or
- * destination are preserved, even if OVERWRITE is true.
- */
-public class LoadDataStmt extends StatementBase {
-  private final TableName tableName_;
-  private final HdfsUri sourceDataPath_;
-  private final PartitionSpec partitionSpec_;
-  private final boolean overwrite_;
-
-  // Set during analysis
-  private String dbName_;
-
-  public LoadDataStmt(TableName tableName, HdfsUri sourceDataPath, boolean overwrite,
-      PartitionSpec partitionSpec) {
-    Preconditions.checkNotNull(tableName);
-    Preconditions.checkNotNull(sourceDataPath);
-    this.tableName_ = tableName;
-    this.sourceDataPath_ = sourceDataPath;
-    this.overwrite_ = overwrite;
-    this.partitionSpec_ = partitionSpec;
-  }
-
-  public String getTbl() {
-    return tableName_.getTbl();
-  }
-
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  /*
-   * Print SQL syntax corresponding to this node.
-   * @see com.cloudera.impala.parser.ParseNode#toSql()
-   */
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("LOAD DATA INPATH '");
-    sb.append(sourceDataPath_ + "' ");
-    if (overwrite_) sb.append("OVERWRITE ");
-    sb.append("INTO TABLE " + tableName_.toString());
-    if (partitionSpec_ != null) sb.append(" " + partitionSpec_.toSql());
-    return sb.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.INSERT);
-    if (!(table instanceof HdfsTable)) {
-      throw new AnalysisException("LOAD DATA only supported for HDFS tables: " +
-          dbName_ + "." + getTbl());
-    }
-
-    // Analyze the partition spec, if one was specified.
-    if (partitionSpec_ != null) {
-      partitionSpec_.setTableName(tableName_);
-      partitionSpec_.setPartitionShouldExist();
-      partitionSpec_.setPrivilegeRequirement(Privilege.INSERT);
-      partitionSpec_.analyze(analyzer);
-    } else {
-      if (table.getMetaStoreTable().getPartitionKeysSize() > 0) {
-        throw new AnalysisException("Table is partitioned but no partition spec was " +
-            "specified: " + dbName_ + "." + getTbl());
-      }
-    }
-    analyzePaths(analyzer, (HdfsTable) table);
-  }
-
-  /**
-   * Check to see if Impala has the necessary permissions to access the source and dest
-   * paths for this LOAD statement (which maps onto a sequence of file move operations,
-   * with the requisite permission requirements), and check to see if all files to be
-   * moved are in format that Impala understands. Errors are raised as AnalysisExceptions.
-   */
-  private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable)
-      throws AnalysisException {
-    // The user must have permission to access the source location. Since the files will
-    // be moved from this location, the user needs to have all permission.
-    sourceDataPath_.analyze(analyzer, Privilege.ALL);
-
-    // Catch all exceptions thrown by accessing files, and rethrow as AnalysisExceptions.
-    try {
-      Path source = sourceDataPath_.getPath();
-      FileSystem fs = source.getFileSystem(FileSystemUtil.getConfiguration());
-      if (!(fs instanceof DistributedFileSystem) && !(fs instanceof S3AFileSystem)) {
-        throw new AnalysisException(String.format("INPATH location '%s' " +
-            "must point to an HDFS or S3A filesystem.", sourceDataPath_));
-      }
-      if (!fs.exists(source)) {
-        throw new AnalysisException(String.format(
-            "INPATH location '%s' does not exist.", sourceDataPath_));
-      }
-
-      // If the source file is a directory, we must be able to read from and write to
-      // it. If the source file is a file, we must be able to read from it, and write to
-      // its parent directory (in order to delete the file as part of the move operation).
-      FsPermissionChecker checker = FsPermissionChecker.getInstance();
-
-      if (fs.isDirectory(source)) {
-        if (FileSystemUtil.getTotalNumVisibleFiles(source) == 0) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' contains no visible files.", sourceDataPath_));
-        }
-        if (FileSystemUtil.containsVisibleSubdirectory(source)) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' cannot contain non-hidden subdirectories.",
-              sourceDataPath_));
-        }
-        if (!checker.getPermissions(fs, source).checkPermissions(
-            FsAction.READ_WRITE)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have READ and WRITE permissions on this directory",
-              source));
-        }
-      } else {
-        // INPATH names a file.
-        if (FileSystemUtil.isHiddenFile(source.getName())) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' points to a hidden file.", source));
-        }
-
-        if (!checker.getPermissions(fs, source.getParent()).checkPermissions(
-            FsAction.WRITE)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have WRITE permissions on its parent " +
-              "directory %s", source, source.getParent()));
-        }
-
-        if (!checker.getPermissions(fs, source).checkPermissions(
-            FsAction.READ)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have READ permissions on this file", source));
-        }
-      }
-
-      String noWriteAccessErrorMsg = String.format("Unable to LOAD DATA into " +
-          "target table (%s) because Impala does not have WRITE access to HDFS " +
-          "location: ", hdfsTable.getFullName());
-
-      HdfsPartition partition;
-      String location;
-      if (partitionSpec_ != null) {
-        partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
-        location = partition.getLocation();
-        if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
-          throw new AnalysisException(noWriteAccessErrorMsg + location);
-        }
-      } else {
-        // "default" partition
-        partition = hdfsTable.getPartitionMap().get(
-            ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID);
-        location = hdfsTable.getLocation();
-        if (!hdfsTable.hasWriteAccess()) {
-          throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
-        }
-      }
-      Preconditions.checkNotNull(partition);
-
-      // Verify the files being loaded are supported.
-      for (FileStatus fStatus: fs.listStatus(source)) {
-        if (fs.isDirectory(fStatus.getPath())) continue;
-        StringBuilder errorMsg = new StringBuilder();
-        HdfsFileFormat fileFormat = partition.getInputFormatDescriptor().getFileFormat();
-        if (!fileFormat.isFileCompressionTypeSupported(fStatus.getPath().toString(),
-          errorMsg)) {
-          throw new AnalysisException(errorMsg.toString());
-        }
-      }
-    } catch (FileNotFoundException e) {
-      throw new AnalysisException("File not found: " + e.getMessage(), e);
-    } catch (IOException e) {
-      throw new AnalysisException("Error accessing filesystem: " + e.getMessage(), e);
-    }
-  }
-
-  public TLoadDataReq toThrift() {
-    TLoadDataReq loadDataReq = new TLoadDataReq();
-    loadDataReq.setTable_name(new TTableName(getDb(), getTbl()));
-    loadDataReq.setSource_path(sourceDataPath_.toString());
-    loadDataReq.setOverwrite(overwrite_);
-    if (partitionSpec_ != null) {
-      loadDataReq.setPartition_spec(partitionSpec_.toThrift());
-    }
-    return loadDataReq;
-  }
-}


[29/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsFileFormat.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsFileFormat.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsFileFormat.java
deleted file mode 100644
index 475a8d7..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsFileFormat.java
+++ /dev/null
@@ -1,261 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Map;
-
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-
-/**
- * Supported HDFS file formats. Every file format specifies:
- * 1) the input format class
- * 2) the output format class
- * 3) the serialization library class
- * 4) whether scanning complex types from it is supported
- *
- * Important note: Always keep consistent with the classes used in Hive.
- */
-public enum HdfsFileFormat {
-  RC_FILE("org.apache.hadoop.hive.ql.io.RCFileInputFormat",
-      "org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
-      "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe",
-      false, true),
-  TEXT("org.apache.hadoop.mapred.TextInputFormat",
-      "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
-      "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
-      false, false),
-  LZO_TEXT("com.hadoop.mapred.DeprecatedLzoTextInputFormat",
-      "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
-      "", false, false),
-  SEQUENCE_FILE("org.apache.hadoop.mapred.SequenceFileInputFormat",
-      "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
-      "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", false,
-      true),
-  AVRO("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat",
-      "org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat",
-      "org.apache.hadoop.hive.serde2.avro.AvroSerDe",
-      false, false),
-  PARQUET("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
-      "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
-      "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
-      true, true);
-
-  private final String inputFormat_;
-  private final String outputFormat_;
-  private final String serializationLib_;
-
-  // Indicates whether we support scanning complex types for this file format.
-  private final boolean isComplexTypesSupported_;
-
-  // Indicates whether the file format can skip complex columns in scans and just
-  // materialize scalar typed columns. Ignored if isComplexTypesSupported_ is true.
-  // TODO: Remove this once we support complex types for all file formats.
-  private final boolean canSkipColumnTypes_;
-
-  HdfsFileFormat(String inputFormat, String outputFormat, String serializationLib,
-      boolean isComplexTypesSupported, boolean canSkipColumnTypes) {
-    inputFormat_ = inputFormat;
-    outputFormat_ = outputFormat;
-    serializationLib_ = serializationLib;
-    isComplexTypesSupported_ = isComplexTypesSupported;
-    canSkipColumnTypes_ = canSkipColumnTypes;
-  }
-
-  public String inputFormat() { return inputFormat_; }
-  public String outputFormat() { return outputFormat_; }
-  public String serializationLib() { return serializationLib_; }
-
-  // Impala supports legacy Parquet input formats and treats them internally as the most
-  // modern Parquet input format.
-  private static final String[] PARQUET_LEGACY_INPUT_FORMATS = {
-      "com.cloudera.impala.hive.serde.ParquetInputFormat",
-      "parquet.hive.DeprecatedParquetInputFormat",
-      "parquet.hive.MapredParquetInputFormat"
-  };
-
-  private static final Map<String, HdfsFileFormat> VALID_INPUT_FORMATS =
-      ImmutableMap.<String, HdfsFileFormat>builder()
-          .put(RC_FILE.inputFormat(), RC_FILE)
-          .put(TEXT.inputFormat(), TEXT)
-          .put(LZO_TEXT.inputFormat(), TEXT)
-          .put(SEQUENCE_FILE.inputFormat(), SEQUENCE_FILE)
-          .put(AVRO.inputFormat(), AVRO)
-          .put(PARQUET.inputFormat(), PARQUET)
-          .put(PARQUET_LEGACY_INPUT_FORMATS[0], PARQUET)
-          .put(PARQUET_LEGACY_INPUT_FORMATS[1], PARQUET)
-          .put(PARQUET_LEGACY_INPUT_FORMATS[2], PARQUET)
-          .build();
-
-  /**
-   * Returns true if the string describes an input format class that we support.
-   */
-  public static boolean isHdfsInputFormatClass(String inputFormatClass) {
-    return VALID_INPUT_FORMATS.containsKey(inputFormatClass);
-  }
-
-  /**
-   * Returns the file format associated with the input format class, or null if
-   * the input format class is not supported.
-   */
-  public static HdfsFileFormat fromHdfsInputFormatClass(String inputFormatClass) {
-    Preconditions.checkNotNull(inputFormatClass);
-    return VALID_INPUT_FORMATS.get(inputFormatClass);
-  }
-
-  /**
-   * Returns the corresponding enum for a SerDe class name. If classname is not one
-   * of our supported formats, throws an IllegalArgumentException like Enum.valueOf
-   */
-  public static HdfsFileFormat fromJavaClassName(String className) {
-    Preconditions.checkNotNull(className);
-    if (isHdfsInputFormatClass(className)) return VALID_INPUT_FORMATS.get(className);
-    throw new IllegalArgumentException(className);
-  }
-
-  public static HdfsFileFormat fromThrift(THdfsFileFormat thriftFormat) {
-    switch (thriftFormat) {
-      case RC_FILE: return HdfsFileFormat.RC_FILE;
-      case TEXT: return HdfsFileFormat.TEXT;
-      case SEQUENCE_FILE: return HdfsFileFormat.SEQUENCE_FILE;
-      case AVRO: return HdfsFileFormat.AVRO;
-      case PARQUET: return HdfsFileFormat.PARQUET;
-      default:
-        throw new RuntimeException("Unknown THdfsFileFormat: "
-            + thriftFormat + " - should never happen!");
-    }
-  }
-
-  public THdfsFileFormat toThrift() {
-    switch (this) {
-      case RC_FILE: return THdfsFileFormat.RC_FILE;
-      case TEXT: return THdfsFileFormat.TEXT;
-      case SEQUENCE_FILE: return THdfsFileFormat.SEQUENCE_FILE;
-      case AVRO: return THdfsFileFormat.AVRO;
-      case PARQUET: return THdfsFileFormat.PARQUET;
-      default:
-        throw new RuntimeException("Unknown HdfsFormat: "
-            + this + " - should never happen!");
-    }
-  }
-
-  public String toSql(HdfsCompression compressionType) {
-    switch (this) {
-      case RC_FILE: return "RCFILE";
-      case TEXT:
-        if (compressionType == HdfsCompression.LZO ||
-            compressionType == HdfsCompression.LZO_INDEX) {
-          // TODO: Update this when we can write LZO text.
-          // It is not currently possible to create a table with LZO compressed text files
-          // in Impala, but this is valid in Hive.
-          return String.format("INPUTFORMAT '%s' OUTPUTFORMAT '%s'",
-              LZO_TEXT.inputFormat(), LZO_TEXT.outputFormat());
-        }
-        return "TEXTFILE";
-      case SEQUENCE_FILE: return "SEQUENCEFILE";
-      case AVRO: return "AVRO";
-      case PARQUET: return "PARQUET";
-      default:
-        throw new RuntimeException("Unknown HdfsFormat: "
-            + this + " - should never happen!");
-    }
-  }
-
-  /*
-   * Checks whether a file is supported in Impala based on the file extension.
-   * Returns true if the file format is supported. If the file format is not
-   * supported, then it returns false and 'errorMsg' contains details on the
-   * incompatibility.
-   *
-   * Impala supports LZO, GZIP, SNAPPY and BZIP2 on text files for partitions that have
-   * been declared in the metastore as TEXT. LZO files can have their own input format.
-   * For now, raise an error on any other type.
-   */
-  public boolean isFileCompressionTypeSupported(String fileName,
-      StringBuilder errorMsg) {
-    // Check to see if the file has a compression suffix.
-    // TODO: Add LZ4
-    HdfsCompression compressionType = HdfsCompression.fromFileName(fileName);
-    switch (compressionType) {
-      case LZO:
-      case LZO_INDEX:
-        // Index files are read by the LZO scanner directly.
-      case GZIP:
-      case SNAPPY:
-      case BZIP2:
-      case NONE:
-        return true;
-      case DEFLATE:
-        // TODO: Ensure that text/deflate works correctly
-        if (this == TEXT) {
-          errorMsg.append("Expected compressed text file with {.lzo,.gzip,.snappy,.bz2} "
-              + "suffix: " + fileName);
-          return false;
-        } else {
-          return true;
-        }
-      default:
-        errorMsg.append("Unknown compression suffix: " + fileName);
-        return false;
-    }
-  }
-
-  /**
-   * Returns true if this file format with the given compression format is splittable.
-   */
-  public boolean isSplittable(HdfsCompression compression) {
-    switch (this) {
-      case TEXT:
-        return compression == HdfsCompression.NONE;
-      case RC_FILE:
-      case SEQUENCE_FILE:
-      case AVRO:
-      case PARQUET:
-        return true;
-      default:
-        throw new RuntimeException("Unknown HdfsFormat: "
-            + this + " - should never happen!");
-    }
-  }
-
-  /**
-   * Returns true if Impala supports scanning complex-typed columns
-   * from a table/partition with this file format.
-   */
-  public boolean isComplexTypesSupported() { return isComplexTypesSupported_; }
-
-  /**
-   * Returns true if this file format can skip complex typed columns and materialize
-   * only scalar typed columns.
-   */
-  public boolean canSkipComplexTypes() { return canSkipColumnTypes_; }
-
-  /**
-   * Returns a list with all formats for which isComplexTypesSupported() is true.
-   */
-  public static List<HdfsFileFormat> complexTypesFormats() {
-    List<HdfsFileFormat> result = Lists.newArrayList();
-    for (HdfsFileFormat f: values()) {
-      if (f.isComplexTypesSupported()) result.add(f);
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartition.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartition.java
deleted file mode 100644
index f408468..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartition.java
+++ /dev/null
@@ -1,791 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.PartitionKeyValue;
-import com.cloudera.impala.analysis.ToSqlUtils;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.THdfsCompression;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsFileDesc;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Query-relevant information for one table partition. Partitions are comparable
- * based on their partition-key values. The comparison orders partitions in ascending
- * order with NULLs sorting last. The ordering is useful for displaying partitions
- * in SHOW statements.
- */
-public class HdfsPartition implements Comparable<HdfsPartition> {
-  /**
-   * Metadata for a single file in this partition.
-   * TODO: Do we even need this class? Just get rid of it and use the Thrift version?
-   */
-  static public class FileDescriptor implements Comparable<FileDescriptor> {
-    private final THdfsFileDesc fileDescriptor_;
-
-    public String getFileName() { return fileDescriptor_.getFile_name(); }
-    public long getFileLength() { return fileDescriptor_.getLength(); }
-    public THdfsCompression getFileCompression() {
-      return fileDescriptor_.getCompression();
-    }
-    public long getModificationTime() {
-      return fileDescriptor_.getLast_modification_time();
-    }
-    public List<THdfsFileBlock> getFileBlocks() {
-      return fileDescriptor_.getFile_blocks();
-    }
-
-    public THdfsFileDesc toThrift() { return fileDescriptor_; }
-
-    public FileDescriptor(String fileName, long fileLength, long modificationTime) {
-      Preconditions.checkNotNull(fileName);
-      Preconditions.checkArgument(fileLength >= 0);
-      fileDescriptor_ = new THdfsFileDesc();
-      fileDescriptor_.setFile_name(fileName);
-      fileDescriptor_.setLength(fileLength);
-      fileDescriptor_.setLast_modification_time(modificationTime);
-      fileDescriptor_.setCompression(
-          HdfsCompression.fromFileName(fileName).toThrift());
-      List<THdfsFileBlock> emptyFileBlockList = Lists.newArrayList();
-      fileDescriptor_.setFile_blocks(emptyFileBlockList);
-    }
-
-    private FileDescriptor(THdfsFileDesc fileDesc) {
-      this(fileDesc.getFile_name(), fileDesc.length, fileDesc.last_modification_time);
-      for (THdfsFileBlock block: fileDesc.getFile_blocks()) {
-        fileDescriptor_.addToFile_blocks(block);
-      }
-    }
-
-    public void addFileBlock(FileBlock blockMd) {
-      fileDescriptor_.addToFile_blocks(blockMd.toThrift());
-    }
-
-    public static FileDescriptor fromThrift(THdfsFileDesc desc) {
-      return new FileDescriptor(desc);
-    }
-
-    @Override
-    public String toString() {
-      return Objects.toStringHelper(this)
-          .add("FileName", getFileName())
-          .add("Length", getFileLength()).toString();
-    }
-
-    /**
-     * Orders file descriptors lexicographically by file name.
-     */
-    @Override
-    public int compareTo(FileDescriptor otherFd) {
-      return getFileName().compareTo(otherFd.getFileName());
-    }
-  }
-
-  /**
-   * Represents metadata of a single block replica.
-   */
-  public static class BlockReplica {
-    private final boolean isCached_;
-    private final int hostIdx_;
-
-    /**
-     * Creates a BlockReplica given a host ID/index and a flag specifying whether this
-     * replica is cahced. Host IDs are assigned when loading the block metadata in
-     * HdfsTable.
-     */
-    public BlockReplica(int hostIdx, boolean isCached) {
-      hostIdx_ = hostIdx;
-      isCached_ = isCached;
-    }
-
-    /**
-     * Parses the location (an ip address:port string) of the replica and returns a
-     * TNetworkAddress with this information, or null if parsing fails.
-     */
-    public static TNetworkAddress parseLocation(String location) {
-      Preconditions.checkNotNull(location);
-      String[] ip_port = location.split(":");
-      if (ip_port.length != 2) return null;
-      try {
-        return new TNetworkAddress(ip_port[0], Integer.parseInt(ip_port[1]));
-      } catch (NumberFormatException e) {
-        return null;
-      }
-    }
-
-    public boolean isCached() { return isCached_; }
-    public int getHostIdx() { return hostIdx_; }
-  }
-
-  /**
-   * File Block metadata
-   */
-  public static class FileBlock {
-    private final THdfsFileBlock fileBlock_;
-    private boolean isCached_; // Set to true if there is at least one cached replica.
-
-    private FileBlock(THdfsFileBlock fileBlock) {
-      fileBlock_ = fileBlock;
-      isCached_ = false;
-      for (boolean isCached: fileBlock.getIs_replica_cached()) {
-        isCached_ |= isCached;
-      }
-    }
-
-    /**
-     * Construct a FileBlock given the start offset (in bytes) of the file associated
-     * with this block, the length of the block (in bytes), and a list of BlockReplicas.
-     * Does not fill diskIds.
-     */
-    public FileBlock(long offset, long blockLength,
-        List<BlockReplica> replicaHostIdxs) {
-      Preconditions.checkNotNull(replicaHostIdxs);
-      fileBlock_ = new THdfsFileBlock();
-      fileBlock_.setOffset(offset);
-      fileBlock_.setLength(blockLength);
-
-      fileBlock_.setReplica_host_idxs(new ArrayList<Integer>(replicaHostIdxs.size()));
-      fileBlock_.setIs_replica_cached(new ArrayList<Boolean>(replicaHostIdxs.size()));
-      isCached_ = false;
-      for (BlockReplica replica: replicaHostIdxs) {
-        fileBlock_.addToReplica_host_idxs(replica.getHostIdx());
-        fileBlock_.addToIs_replica_cached(replica.isCached());
-        isCached_ |= replica.isCached();
-      }
-    }
-
-    public long getOffset() { return fileBlock_.getOffset(); }
-    public long getLength() { return fileBlock_.getLength(); }
-    // Returns true if at there at least one cached replica.
-    public boolean isCached() { return isCached_; }
-    public List<Integer> getReplicaHostIdxs() {
-      return fileBlock_.getReplica_host_idxs();
-    }
-
-    /**
-     * Populates the given THdfsFileBlock's list of disk ids with the given disk id
-     * values. The number of disk ids must match the number of network addresses
-     * set in the file block.
-     */
-    public static void setDiskIds(int[] diskIds, THdfsFileBlock fileBlock) {
-      Preconditions.checkArgument(
-          diskIds.length == fileBlock.getReplica_host_idxs().size());
-      fileBlock.setDisk_ids(Arrays.asList(ArrayUtils.toObject(diskIds)));
-    }
-
-    /**
-     * Return the disk id of the block in BlockLocation.getNames()[hostIndex]; -1 if
-     * disk id is not supported.
-     */
-    public int getDiskId(int hostIndex) {
-      if (fileBlock_.disk_ids == null) return -1;
-      return fileBlock_.getDisk_ids().get(hostIndex);
-    }
-
-    public boolean isCached(int hostIndex) {
-      return fileBlock_.getIs_replica_cached().get(hostIndex);
-    }
-
-    public THdfsFileBlock toThrift() { return fileBlock_; }
-
-    public static FileBlock fromThrift(THdfsFileBlock thriftFileBlock) {
-      return new FileBlock(thriftFileBlock);
-    }
-
-    @Override
-    public String toString() {
-      return Objects.toStringHelper(this)
-          .add("offset", fileBlock_.offset)
-          .add("length", fileBlock_.length)
-          .add("#disks", fileBlock_.getDisk_idsSize())
-          .toString();
-    }
-  }
-
-  private final HdfsTable table_;
-  private final List<LiteralExpr> partitionKeyValues_;
-  // estimated number of rows in partition; -1: unknown
-  private long numRows_ = -1;
-  private static AtomicLong partitionIdCounter_ = new AtomicLong();
-
-  // A unique ID for each partition, used to identify a partition in the thrift
-  // representation of a table.
-  private final long id_;
-
-  /*
-   * Note: Although you can write multiple formats to a single partition (by changing
-   * the format before each write), Hive won't let you read that data and neither should
-   * we. We should therefore treat mixing formats inside one partition as user error.
-   * It's easy to add per-file metadata to FileDescriptor if this changes.
-   */
-  private final HdfsStorageDescriptor fileFormatDescriptor_;
-  private List<FileDescriptor> fileDescriptors_;
-  private HdfsPartitionLocationCompressor.Location location_;
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartition.class);
-  private boolean isDirty_ = false;
-  // True if this partition is marked as cached. Does not necessarily mean the data is
-  // cached.
-  private boolean isMarkedCached_ = false;
-  private final TAccessLevel accessLevel_;
-
-  // (k,v) pairs of parameters for this partition, stored in the HMS. Used by Impala to
-  // store intermediate state for statistics computations.
-  private Map<String, String> hmsParameters_;
-
-  public HdfsStorageDescriptor getInputFormatDescriptor() {
-    return fileFormatDescriptor_;
-  }
-
-  public boolean isDefaultPartition() {
-    return id_ == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
-  }
-
-  /**
-   * Returns true if the partition resides at a location which can be cached (e.g. HDFS).
-   */
-  public boolean isCacheable() {
-    return FileSystemUtil.isPathCacheable(new Path(getLocation()));
-  }
-
-  /**
-   * Return a partition name formed by concatenating partition keys and their values,
-   * compatible with the way Hive names partitions. Reuses Hive's
-   * org.apache.hadoop.hive.common.FileUtils.makePartName() function to build the name
-   * string because there are a number of special cases for how partition names are URL
-   * escaped.
-   * TODO: Consider storing the PartitionKeyValue in HdfsPartition. It would simplify
-   * this code would be useful in other places, such as fromThrift().
-   */
-  public String getPartitionName() {
-    List<String> partitionCols = Lists.newArrayList();
-    for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
-      partitionCols.add(getTable().getColumns().get(i).getName());
-    }
-
-    return org.apache.hadoop.hive.common.FileUtils.makePartName(
-        partitionCols, getPartitionValuesAsStrings(true));
-  }
-
-  /**
-   * Returns a list of partition values as strings. If mapNullsToHiveKey is true, any NULL
-   * value is returned as the table's default null partition key string value, otherwise
-   * they are returned as 'NULL'.
-   */
-  public List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
-    List<String> ret = Lists.newArrayList();
-    for (LiteralExpr partValue: getPartitionValues()) {
-      if (mapNullsToHiveKey) {
-        ret.add(PartitionKeyValue.getPartitionKeyValueString(
-                partValue, getTable().getNullPartitionKeyValue()));
-      } else {
-        ret.add(partValue.getStringValue());
-      }
-    }
-    return ret;
-  }
-
-  /**
-   * Utility method which returns a string of conjuncts of equality exprs to exactly
-   * select this partition (e.g. ((month=2009) AND (year=2012)).
-   * TODO: Remove this when the TODO elsewhere in this file to save and expose the
-   * list of TPartitionKeyValues has been resolved.
-   */
-  public String getConjunctSql() {
-    List<String> partColSql = Lists.newArrayList();
-    for (Column partCol: getTable().getClusteringColumns()) {
-      partColSql.add(ToSqlUtils.getIdentSql(partCol.getName()));
-    }
-
-    List<String> conjuncts = Lists.newArrayList();
-    for (int i = 0; i < partColSql.size(); ++i) {
-      LiteralExpr partVal = getPartitionValues().get(i);
-      String partValSql = partVal.toSql();
-      if (partVal instanceof NullLiteral || partValSql.isEmpty()) {
-        conjuncts.add(partColSql.get(i) + " IS NULL");
-      } else {
-        conjuncts.add(partColSql.get(i) + "=" + partValSql);
-      }
-    }
-    return "(" + Joiner.on(" AND " ).join(conjuncts) + ")";
-  }
-
-  /**
-   * Returns a string of the form part_key1=value1/part_key2=value2...
-   */
-  public String getValuesAsString() {
-    StringBuilder partDescription = new StringBuilder();
-    for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
-      String columnName = getTable().getColumns().get(i).getName();
-      String value = PartitionKeyValue.getPartitionKeyValueString(
-          getPartitionValues().get(i),
-          getTable().getNullPartitionKeyValue());
-      partDescription.append(columnName + "=" + value);
-      if (i != getTable().getNumClusteringCols() - 1) partDescription.append("/");
-    }
-    return partDescription.toString();
-  }
-
-  /**
-   * Returns the storage location (HDFS path) of this partition. Should only be called
-   * for partitioned tables.
-   */
-  public String getLocation() {
-    return (location_ != null) ? location_.toString() : null;
-  }
-  public long getId() { return id_; }
-  public HdfsTable getTable() { return table_; }
-  public void setNumRows(long numRows) { numRows_ = numRows; }
-  public long getNumRows() { return numRows_; }
-  public boolean isMarkedCached() { return isMarkedCached_; }
-  void markCached() { isMarkedCached_ = true; }
-
-  /**
-   * Updates the file format of this partition and sets the corresponding input/output
-   * format classes.
-   */
-  public void setFileFormat(HdfsFileFormat fileFormat) {
-    fileFormatDescriptor_.setFileFormat(fileFormat);
-    cachedMsPartitionDescriptor_.sdInputFormat = fileFormat.inputFormat();
-    cachedMsPartitionDescriptor_.sdOutputFormat = fileFormat.outputFormat();
-    cachedMsPartitionDescriptor_.sdSerdeInfo.setSerializationLib(
-        fileFormatDescriptor_.getFileFormat().serializationLib());
-  }
-
-  public HdfsFileFormat getFileFormat() {
-    return fileFormatDescriptor_.getFileFormat();
-  }
-
-  public void setLocation(String place) {
-    location_ = table_.getPartitionLocationCompressor().new Location(place);
-  }
-
-  public org.apache.hadoop.hive.metastore.api.SerDeInfo getSerdeInfo() {
-    return cachedMsPartitionDescriptor_.sdSerdeInfo;
-  }
-
-  // May return null if no per-partition stats were recorded, or if the per-partition
-  // stats could not be deserialised from the parameter map.
-  public TPartitionStats getPartitionStats() {
-    try {
-      return PartitionStatsUtil.partStatsFromParameters(hmsParameters_);
-    } catch (ImpalaException e) {
-      LOG.warn("Could not deserialise incremental stats state for " + getPartitionName() +
-          ", consider DROP INCREMENTAL STATS ... PARTITION ... and recomputing " +
-          "incremental stats for this table.");
-      return null;
-    }
-  }
-
-  public boolean hasIncrementalStats() {
-    TPartitionStats partStats = getPartitionStats();
-    return partStats != null && partStats.intermediate_col_stats != null;
-  }
-
-  /**
-   * Returns the HDFS permissions Impala has to this partition's directory - READ_ONLY,
-   * READ_WRITE, etc.
-   */
-  public TAccessLevel getAccessLevel() { return accessLevel_; }
-
-  /**
-   * Returns the HMS parameter with key 'key' if it exists, otherwise returns null.
-   */
-   public String getParameter(String key) {
-     return hmsParameters_.get(key);
-   }
-
-   public Map<String, String> getParameters() { return hmsParameters_; }
-
-   public void putToParameters(String k, String v) { hmsParameters_.put(k, v); }
-
-  /**
-   * Marks this partition's metadata as "dirty" indicating that changes have been
-   * made and this partition's metadata should not be reused during the next
-   * incremental metadata refresh.
-   */
-  public void markDirty() { isDirty_ = true; }
-  public boolean isDirty() { return isDirty_; }
-
-  /**
-   * Returns an immutable list of partition key expressions
-   */
-  public List<LiteralExpr> getPartitionValues() { return partitionKeyValues_; }
-  public LiteralExpr getPartitionValue(int i) { return partitionKeyValues_.get(i); }
-  public List<HdfsPartition.FileDescriptor> getFileDescriptors() {
-    return fileDescriptors_;
-  }
-  public void setFileDescriptors(List<FileDescriptor> descriptors) {
-    fileDescriptors_ = descriptors;
-  }
-  public long getNumFileDescriptors() {
-    return fileDescriptors_ == null ? 0 : fileDescriptors_.size();
-  }
-
-  public boolean hasFileDescriptors() { return !fileDescriptors_.isEmpty(); }
-
-  // Struct-style class for caching all the information we need to reconstruct an
-  // HMS-compatible Partition object, for use in RPCs to the metastore. We do this rather
-  // than cache the Thrift partition object itself as the latter can be large - thanks
-  // mostly to the inclusion of the full FieldSchema list. This class is read-only - if
-  // any field can be mutated by Impala it should belong to HdfsPartition itself (see
-  // HdfsPartition.location_ for an example).
-  //
-  // TODO: Cache this descriptor in HdfsTable so that identical descriptors are shared
-  // between HdfsPartition instances.
-  // TODO: sdInputFormat and sdOutputFormat can be mutated by Impala when the file format
-  // of a partition changes; move these fields to HdfsPartition.
-  private static class CachedHmsPartitionDescriptor {
-    public String sdInputFormat;
-    public String sdOutputFormat;
-    public final boolean sdCompressed;
-    public final int sdNumBuckets;
-    public final org.apache.hadoop.hive.metastore.api.SerDeInfo sdSerdeInfo;
-    public final List<String> sdBucketCols;
-    public final List<org.apache.hadoop.hive.metastore.api.Order> sdSortCols;
-    public final Map<String, String> sdParameters;
-    public final int msCreateTime;
-    public final int msLastAccessTime;
-
-    public CachedHmsPartitionDescriptor(
-        org.apache.hadoop.hive.metastore.api.Partition msPartition) {
-      org.apache.hadoop.hive.metastore.api.StorageDescriptor sd = null;
-      if (msPartition != null) {
-        sd = msPartition.getSd();
-        msCreateTime = msPartition.getCreateTime();
-        msLastAccessTime = msPartition.getLastAccessTime();
-      } else {
-        msCreateTime = msLastAccessTime = 0;
-      }
-      if (sd != null) {
-        sdInputFormat = sd.getInputFormat();
-        sdOutputFormat = sd.getOutputFormat();
-        sdCompressed = sd.isCompressed();
-        sdNumBuckets = sd.getNumBuckets();
-        sdSerdeInfo = sd.getSerdeInfo();
-        sdBucketCols = ImmutableList.copyOf(sd.getBucketCols());
-        sdSortCols = ImmutableList.copyOf(sd.getSortCols());
-        sdParameters = ImmutableMap.copyOf(sd.getParameters());
-      } else {
-        sdInputFormat = "";
-        sdOutputFormat = "";
-        sdCompressed = false;
-        sdNumBuckets = 0;
-        sdSerdeInfo = null;
-        sdBucketCols = ImmutableList.of();
-        sdSortCols = ImmutableList.of();
-        sdParameters = ImmutableMap.of();
-      }
-    }
-  }
-
-  private final CachedHmsPartitionDescriptor cachedMsPartitionDescriptor_;
-
-  public CachedHmsPartitionDescriptor getCachedMsPartitionDescriptor() {
-    return cachedMsPartitionDescriptor_;
-  }
-
-  /**
-   * Returns a Hive-compatible partition object that may be used in calls to the
-   * metastore.
-   */
-  public org.apache.hadoop.hive.metastore.api.Partition toHmsPartition() {
-    if (cachedMsPartitionDescriptor_ == null) return null;
-    Preconditions.checkNotNull(table_.getNonPartitionFieldSchemas());
-    // Update the serde library class based on the currently used file format.
-    org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor =
-        new org.apache.hadoop.hive.metastore.api.StorageDescriptor(
-            table_.getNonPartitionFieldSchemas(),
-            getLocation(),
-            cachedMsPartitionDescriptor_.sdInputFormat,
-            cachedMsPartitionDescriptor_.sdOutputFormat,
-            cachedMsPartitionDescriptor_.sdCompressed,
-            cachedMsPartitionDescriptor_.sdNumBuckets,
-            cachedMsPartitionDescriptor_.sdSerdeInfo,
-            cachedMsPartitionDescriptor_.sdBucketCols,
-            cachedMsPartitionDescriptor_.sdSortCols,
-            cachedMsPartitionDescriptor_.sdParameters);
-    org.apache.hadoop.hive.metastore.api.Partition partition =
-        new org.apache.hadoop.hive.metastore.api.Partition(
-            getPartitionValuesAsStrings(true), getTable().getDb().getName(),
-            getTable().getName(), cachedMsPartitionDescriptor_.msCreateTime,
-            cachedMsPartitionDescriptor_.msLastAccessTime, storageDescriptor,
-            getParameters());
-    return partition;
-  }
-
-  private HdfsPartition(HdfsTable table,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition,
-      List<LiteralExpr> partitionKeyValues,
-      HdfsStorageDescriptor fileFormatDescriptor,
-      Collection<HdfsPartition.FileDescriptor> fileDescriptors, long id,
-      HdfsPartitionLocationCompressor.Location location, TAccessLevel accessLevel) {
-    table_ = table;
-    if (msPartition == null) {
-      cachedMsPartitionDescriptor_ = null;
-    } else {
-      cachedMsPartitionDescriptor_ = new CachedHmsPartitionDescriptor(msPartition);
-    }
-    location_ = location;
-    partitionKeyValues_ = ImmutableList.copyOf(partitionKeyValues);
-    fileDescriptors_ = ImmutableList.copyOf(fileDescriptors);
-    fileFormatDescriptor_ = fileFormatDescriptor;
-    id_ = id;
-    accessLevel_ = accessLevel;
-    if (msPartition != null && msPartition.getParameters() != null) {
-      isMarkedCached_ = HdfsCachingUtil.getCacheDirectiveId(
-          msPartition.getParameters()) != null;
-      hmsParameters_ = msPartition.getParameters();
-    } else {
-      hmsParameters_ = Maps.newHashMap();
-    }
-
-    // TODO: instead of raising an exception, we should consider marking this partition
-    // invalid and moving on, so that table loading won't fail and user can query other
-    // partitions.
-    for (FileDescriptor fileDescriptor: fileDescriptors_) {
-      StringBuilder errorMsg = new StringBuilder();
-      if (!getInputFormatDescriptor().getFileFormat().isFileCompressionTypeSupported(
-          fileDescriptor.getFileName(), errorMsg)) {
-        throw new RuntimeException(errorMsg.toString());
-      }
-    }
-  }
-
-  public HdfsPartition(HdfsTable table,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition,
-      List<LiteralExpr> partitionKeyValues,
-      HdfsStorageDescriptor fileFormatDescriptor,
-      Collection<HdfsPartition.FileDescriptor> fileDescriptors,
-      TAccessLevel accessLevel) {
-    this(table, msPartition, partitionKeyValues, fileFormatDescriptor, fileDescriptors,
-        partitionIdCounter_.getAndIncrement(),
-        table.getPartitionLocationCompressor().new Location(msPartition != null
-                ? msPartition.getSd().getLocation()
-                : table.getLocation()),
-        accessLevel);
-  }
-
-  public static HdfsPartition defaultPartition(
-      HdfsTable table, HdfsStorageDescriptor storageDescriptor) {
-    List<LiteralExpr> emptyExprList = Lists.newArrayList();
-    List<FileDescriptor> emptyFileDescriptorList = Lists.newArrayList();
-    return new HdfsPartition(table, null, emptyExprList,
-        storageDescriptor, emptyFileDescriptorList,
-        ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID, null,
-        TAccessLevel.READ_WRITE);
-  }
-
-  /**
-   * Return the size (in bytes) of all the files inside this partition
-   */
-  public long getSize() {
-    long result = 0;
-    for (HdfsPartition.FileDescriptor fileDescriptor: fileDescriptors_) {
-      result += fileDescriptor.getFileLength();
-    }
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this)
-      .add("fileDescriptors", fileDescriptors_)
-      .toString();
-  }
-
-  private static Predicate<String> isIncrementalStatsKey = new Predicate<String>() {
-    @Override
-    public boolean apply(String key) {
-      return !(key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_NUM_CHUNKS)
-          || key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_CHUNK_PREFIX));
-    }
-  };
-
-  /**
-   * Returns hmsParameters_ after filtering out all the partition
-   * incremental stats information.
-   */
-  private Map<String, String> getFilteredHmsParameters() {
-    return Maps.filterKeys(hmsParameters_, isIncrementalStatsKey);
-  }
-
-  public static HdfsPartition fromThrift(HdfsTable table,
-      long id, THdfsPartition thriftPartition) {
-    HdfsStorageDescriptor storageDesc = new HdfsStorageDescriptor(table.getName(),
-        HdfsFileFormat.fromThrift(thriftPartition.getFileFormat()),
-        thriftPartition.lineDelim,
-        thriftPartition.fieldDelim,
-        thriftPartition.collectionDelim,
-        thriftPartition.mapKeyDelim,
-        thriftPartition.escapeChar,
-        (byte) '"', // TODO: We should probably add quoteChar to THdfsPartition.
-        thriftPartition.blockSize);
-
-    List<LiteralExpr> literalExpr = Lists.newArrayList();
-    if (id != ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-      List<Column> clusterCols = Lists.newArrayList();
-      for (int i = 0; i < table.getNumClusteringCols(); ++i) {
-        clusterCols.add(table.getColumns().get(i));
-      }
-
-      List<TExprNode> exprNodes = Lists.newArrayList();
-      for (TExpr expr: thriftPartition.getPartitionKeyExprs()) {
-        for (TExprNode node: expr.getNodes()) {
-          exprNodes.add(node);
-        }
-      }
-      Preconditions.checkState(clusterCols.size() == exprNodes.size(),
-          String.format("Number of partition columns (%d) does not match number " +
-              "of partition key expressions (%d)",
-              clusterCols.size(), exprNodes.size()));
-
-      for (int i = 0; i < exprNodes.size(); ++i) {
-        literalExpr.add(LiteralExpr.fromThrift(
-            exprNodes.get(i), clusterCols.get(i).getType()));
-      }
-    }
-
-    List<HdfsPartition.FileDescriptor> fileDescriptors = Lists.newArrayList();
-    if (thriftPartition.isSetFile_desc()) {
-      for (THdfsFileDesc desc: thriftPartition.getFile_desc()) {
-        fileDescriptors.add(HdfsPartition.FileDescriptor.fromThrift(desc));
-      }
-    }
-
-    TAccessLevel accessLevel = thriftPartition.isSetAccess_level() ?
-        thriftPartition.getAccess_level() : TAccessLevel.READ_WRITE;
-    HdfsPartitionLocationCompressor.Location location = thriftPartition.isSetLocation()
-        ? table.getPartitionLocationCompressor().new Location(
-              thriftPartition.getLocation())
-        : null;
-    HdfsPartition partition = new HdfsPartition(table, null, literalExpr, storageDesc,
-        fileDescriptors, id, location, accessLevel);
-    if (thriftPartition.isSetStats()) {
-      partition.setNumRows(thriftPartition.getStats().getNum_rows());
-    }
-    if (thriftPartition.isSetIs_marked_cached()) {
-      partition.isMarkedCached_ = thriftPartition.isIs_marked_cached();
-    }
-
-    if (thriftPartition.isSetHms_parameters()) {
-      partition.hmsParameters_ = thriftPartition.getHms_parameters();
-    } else {
-      partition.hmsParameters_ = Maps.newHashMap();
-    }
-
-    return partition;
-  }
-
-  /**
-   * Checks that this partition's metadata is well formed. This does not necessarily
-   * mean the partition is supported by Impala.
-   * Throws a CatalogException if there are any errors in the partition metadata.
-   */
-  public void checkWellFormed() throws CatalogException {
-    try {
-      // Validate all the partition key/values to ensure you can convert them toThrift()
-      Expr.treesToThrift(getPartitionValues());
-    } catch (Exception e) {
-      throw new CatalogException("Partition (" + getPartitionName() +
-          ") has invalid partition column values: ", e);
-    }
-  }
-
-  public THdfsPartition toThrift(boolean includeFileDesc,
-      boolean includeIncrementalStats) {
-    List<TExpr> thriftExprs = Expr.treesToThrift(getPartitionValues());
-
-    THdfsPartition thriftHdfsPart = new THdfsPartition(
-        fileFormatDescriptor_.getLineDelim(),
-        fileFormatDescriptor_.getFieldDelim(),
-        fileFormatDescriptor_.getCollectionDelim(),
-        fileFormatDescriptor_.getMapKeyDelim(),
-        fileFormatDescriptor_.getEscapeChar(),
-        fileFormatDescriptor_.getFileFormat().toThrift(), thriftExprs,
-        fileFormatDescriptor_.getBlockSize());
-    if (location_ != null) thriftHdfsPart.setLocation(location_.toThrift());
-    thriftHdfsPart.setStats(new TTableStats(numRows_));
-    thriftHdfsPart.setAccess_level(accessLevel_);
-    thriftHdfsPart.setIs_marked_cached(isMarkedCached_);
-    thriftHdfsPart.setId(getId());
-    thriftHdfsPart.setHms_parameters(
-        includeIncrementalStats ? hmsParameters_ : getFilteredHmsParameters());
-    if (includeFileDesc) {
-      // Add block location information
-      for (FileDescriptor fd: fileDescriptors_) {
-        thriftHdfsPart.addToFile_desc(fd.toThrift());
-      }
-    }
-
-    return thriftHdfsPart;
-  }
-
-  /**
-   * Comparison method to allow ordering of HdfsPartitions by their partition-key values.
-   */
-  @Override
-  public int compareTo(HdfsPartition o) {
-    return comparePartitionKeyValues(partitionKeyValues_, o.getPartitionValues());
-  }
-
-  @VisibleForTesting
-  public static int comparePartitionKeyValues(List<LiteralExpr> lhs,
-      List<LiteralExpr> rhs) {
-    int sizeDiff = lhs.size() - rhs.size();
-    if (sizeDiff != 0) return sizeDiff;
-    for(int i = 0; i < lhs.size(); ++i) {
-      int cmp = lhs.get(i).compareTo(rhs.get(i));
-      if (cmp != 0) return cmp;
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartitionLocationCompressor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartitionLocationCompressor.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartitionLocationCompressor.java
deleted file mode 100644
index b72b846..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsPartitionLocationCompressor.java
+++ /dev/null
@@ -1,153 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.THdfsPartitionLocation;
-import com.cloudera.impala.util.ListMap;
-import com.google.common.base.Preconditions;
-
-/**
- * Utility class for storing HdfsPartition locations in a comrpessed format.  Each
- * instance of this class is owned by a single HdfsTable instance.
- *
- * This class is not thread-safe by itself since it is only modified when the lock on an
- * HdfsTable object is held.
- *
- * TODO: Generalize this to compress other sets of Strings that are likely to share common
- * prefixes, like table locations.
- *
- */
-class HdfsPartitionLocationCompressor {
-  int numClusteringColumns_;
-
-  // A bi-directional map between partition location prefixes and their compressed
-  // representation, an int.
-  final private ListMap<String> prefixMap_ = new ListMap<String>();
-
-  public HdfsPartitionLocationCompressor(int numClusteringColumns) {
-    numClusteringColumns_ = numClusteringColumns;
-  }
-
-  // Construct an HdfsPartitionLocationCompressor with a pre-filled bidirectional map
-  // (indexToPrefix_, prefixToIndex_).
-  public HdfsPartitionLocationCompressor(
-      int numClusteringColumns, ArrayList<String> prefixes) {
-    numClusteringColumns_ = numClusteringColumns;
-    prefixMap_.populate(prefixes);
-  }
-
-  public void setClusteringColumns(int numClusteringColumns) {
-    numClusteringColumns_ = numClusteringColumns;
-  }
-
-  public List<String> getPrefixes() {
-    return prefixMap_.getList();
-  }
-
-  // One direction of the map: returns the prefix associated with an index, or "" is the
-  // index is -1. Indexes less than -1 or greater than indexToPrefix_.size()-1 are invalid
-  // and casue and IllegalArgumentException to be thrown.
-  private String indexToPrefix(int i) {
-    // Uncompressed location are represented by -1:
-    if (i == -1) return "";
-    Preconditions.checkElementIndex(i, prefixMap_.size());
-    return prefixMap_.getEntry(i);
-  }
-
-  // Compress a location prefix, adding it to the bidirectional map (indexToPrefix_,
-  // prefixToIndex_) if it is not already present.
-  private int prefixToIndex(String s) {
-    return prefixMap_.getIndex(s);
-  }
-
-  // A surrogate for THdfsPartitionLocation, which represents a partition's location
-  // relative to its parent table's list of partition prefixes.
-  public class Location {
-    // 'prefix_index_' represents the portion of the partition's location that comes before
-    // the last N directories, where N is the number of partitioning columns.
-    // 'prefix_index_' is an index into
-    // HdfsPartitionLocationCompressor.this.indexToPrefix_. 'suffix_' is the rest of the
-    // partition location.
-    //
-    // TODO: Since each partition stores the literal values for the partitioning columns,
-    // we could also elide the column names and values from suffix_ when a partition is in
-    // the canonical location "/partitioning_column_name_1=value_1/..."
-    private final int prefix_index_;
-    private final String suffix_;
-
-    public Location(String location) {
-      Preconditions.checkNotNull(location);
-      Pair<String,String> locationParts = decompose(location);
-      prefix_index_ =
-          HdfsPartitionLocationCompressor.this.prefixToIndex(locationParts.first);
-      suffix_ = locationParts.second;
-    }
-
-    public Location(THdfsPartitionLocation thrift) {
-      Preconditions.checkNotNull(thrift);
-      prefix_index_ = thrift.prefix_index;
-      suffix_ = thrift.getSuffix();
-    }
-
-    public THdfsPartitionLocation toThrift() {
-      return new THdfsPartitionLocation(prefix_index_, suffix_);
-    }
-
-    @Override
-    public String toString() {
-      return HdfsPartitionLocationCompressor.this.indexToPrefix(prefix_index_) + suffix_;
-    }
-
-    @Override
-    public int hashCode() { return toString().hashCode(); }
-
-    @Override
-    public boolean equals(Object obj) {
-      return (obj instanceof Location) && (toString() == obj.toString());
-    }
-
-    // Decompose a location string by removing its last N directories, where N is the
-    // number of clustering columns. The result is a Pair<String,String> where the first
-    // String is the prefix and the second is the suffix. (In orther words, their
-    // concatenation equals the input.) If the input does not have at least N '/'
-    // characters, the prefix is empty and the suffix is the entire input.
-    private Pair<String,String> decompose(String s) {
-      Preconditions.checkNotNull(s);
-      int numClusteringColumns =
-          HdfsPartitionLocationCompressor.this.numClusteringColumns_;
-      if (numClusteringColumns == 0) return new Pair<String,String>(s, "");
-      // Iterate backwards over the input until we have passed 'numClusteringColumns'
-      // directories. What is left is the prefix.
-      int i = s.length() - 1;
-      // If the string ends in '/', iterating past it does not pass a clustering column.
-      if (i >= 0 && s.charAt(i) == '/') --i;
-      for (; numClusteringColumns > 0 && i >= 0; --i) {
-        if (s.charAt(i) == '/') --numClusteringColumns;
-      }
-      // If we successfully removed all the partition directories, s.charAt(i+1) is '/'
-      // and we can include it in the prefix.
-      if (0 == numClusteringColumns) ++i;
-      return new Pair<String,String>(s.substring(0, i + 1), s.substring(i + 1));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsStorageDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsStorageDescriptor.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsStorageDescriptor.java
deleted file mode 100644
index f018ce3..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsStorageDescriptor.java
+++ /dev/null
@@ -1,240 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Maps;
-
-/**
- * Represents the file format metadata for files stored in a table or partition.
- */
-public class HdfsStorageDescriptor {
-  public static final char DEFAULT_LINE_DELIM = '\n';
-  // hive by default uses ctrl-a as field delim
-  public static final char DEFAULT_FIELD_DELIM = '\u0001';
-  // hive by default has no escape char
-  public static final char DEFAULT_ESCAPE_CHAR = '\u0000';
-
-  // Serde parameters that are recognized by table writers.
-  private static final String BLOCK_SIZE = "blocksize";
-  private static final String COMPRESSION = "compression";
-
-  // Important: don't change the ordering of these keys - if e.g. FIELD_DELIM is not
-  // found, the value of LINE_DELIM is used, so LINE_DELIM must be found first.
-  // Package visible for testing.
-  final static List<String> DELIMITER_KEYS = ImmutableList.of(
-      serdeConstants.LINE_DELIM, serdeConstants.FIELD_DELIM,
-      serdeConstants.COLLECTION_DELIM, serdeConstants.MAPKEY_DELIM,
-      serdeConstants.ESCAPE_CHAR, serdeConstants.QUOTE_CHAR);
-
-  // The Parquet serde shows up multiple times as the location of the implementation
-  // has changed between Impala versions.
-  final static List<String> COMPATIBLE_SERDES = ImmutableList.of(
-      "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", // (seq / text / parquet)
-      "org.apache.hadoop.hive.serde2.avro.AvroSerDe", // (avro)
-      "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe", // (rc)
-      "parquet.hive.serde.ParquetHiveSerDe", // (parquet - legacy)
-      // TODO: Verify the following Parquet SerDe works with Impala and add
-      // support for the new input/output format classes. See CDH-17085.
-      "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"); // (parquet)
-
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsStorageDescriptor.class);
-
-  private HdfsFileFormat fileFormat_;
-  private final byte lineDelim_;
-  private final byte fieldDelim_;
-  private final byte collectionDelim_;
-  private final byte mapKeyDelim_;
-  private final byte escapeChar_;
-  private final byte quoteChar_;
-  private final int blockSize_;
-
-  public void setFileFormat(HdfsFileFormat fileFormat) {
-    fileFormat_ = fileFormat;
-  }
-
-  /**
-   * Returns a map from delimiter key to a single delimiter character,
-   * filling in defaults if explicit values are not found in the supplied
-   * serde descriptor.
-   *
-   * @throws InvalidStorageDescriptorException - if an invalid delimiter is found
-   */
-  private static Map<String, Byte> extractDelimiters(SerDeInfo serdeInfo)
-      throws InvalidStorageDescriptorException {
-    // The metastore may return null for delimiter parameters,
-    // which means we need to use a default instead.
-    // We tried long and hard to find default values for delimiters in Hive,
-    // but could not find them.
-    Map<String, Byte> delimMap = Maps.newHashMap();
-
-    for (String delimKey: DELIMITER_KEYS) {
-      String delimValue = serdeInfo.getParameters().get(delimKey);
-      if (delimValue == null) {
-        if (delimKey.equals(serdeConstants.FIELD_DELIM)) {
-          delimMap.put(delimKey, (byte) DEFAULT_FIELD_DELIM);
-        } else if (delimKey.equals(serdeConstants.ESCAPE_CHAR)) {
-          delimMap.put(delimKey, (byte) DEFAULT_ESCAPE_CHAR);
-        } else if (delimKey.equals(serdeConstants.LINE_DELIM)) {
-          delimMap.put(delimKey, (byte) DEFAULT_LINE_DELIM);
-        } else {
-          delimMap.put(delimKey, delimMap.get(serdeConstants.FIELD_DELIM));
-        }
-      } else {
-        Byte delimByteValue = parseDelim(delimValue);
-        if (delimByteValue == null) {
-          throw new InvalidStorageDescriptorException("Invalid delimiter: '" +
-              delimValue + "'. Delimiter must be specified as a single character or " +
-              "as a decimal value in the range [-128:127]");
-        }
-        delimMap.put(delimKey, parseDelim(delimValue));
-      }
-    }
-    return delimMap;
-  }
-
-  /**
-   * Parses a delimiter in a similar way as Hive, with some additional error checking.
-   * A delimiter must fit in a single byte and can be specified in the following
-   * formats, as far as I can tell (there isn't documentation):
-   * - A single ASCII or unicode character (ex. '|')
-   * - An escape character in octal format (ex. \001. Stored in the metastore as a
-   *   unicode character: \u0001).
-   * - A signed decimal integer in the range [-128:127]. Used to support delimiters
-   *   for ASCII character values between 128-255 (-2 maps to ASCII 254).
-   *
-   * The delimiter is first parsed as a decimal number. If the parsing succeeds AND
-   * the resulting value fits in a signed byte, the byte value of the parsed int is
-   * returned. Otherwise, if the string has a single char, the byte value of this
-   * char is returned.
-   * If the delimiter is invalid, null will be returned.
-   */
-  public static Byte parseDelim(String delimVal) {
-    Preconditions.checkNotNull(delimVal);
-    try {
-      // In the future we could support delimiters specified in hex format, but we would
-      // need support from the Hive side.
-      return Byte.parseByte(delimVal);
-    } catch (NumberFormatException e) {
-      if (delimVal.length() == 1) return (byte) delimVal.charAt(0);
-    }
-    return null;
-  }
-
-  public HdfsStorageDescriptor(String tblName, HdfsFileFormat fileFormat, byte lineDelim,
-      byte fieldDelim, byte collectionDelim, byte mapKeyDelim, byte escapeChar,
-      byte quoteChar, int blockSize) {
-    this.fileFormat_ = fileFormat;
-    this.lineDelim_ = lineDelim;
-    this.fieldDelim_ = fieldDelim;
-    this.collectionDelim_ = collectionDelim;
-    this.mapKeyDelim_ = mapKeyDelim;
-    this.quoteChar_ = quoteChar;
-    this.blockSize_ = blockSize;
-
-    // You can set the escape character as a tuple or row delim.  Empirically,
-    // this is ignored by hive.
-    if (escapeChar == fieldDelim ||
-        escapeChar == lineDelim ||
-        escapeChar == collectionDelim) {
-      // TODO: we should output the table name here but it's hard to get to now.
-      this.escapeChar_ = DEFAULT_ESCAPE_CHAR;
-      LOG.warn("Escape character for table, " + tblName + " is set to "
-          + "the same character as one of the delimiters.  Ignoring escape character.");
-    } else {
-      this.escapeChar_ = escapeChar;
-    }
-  }
-
-  /**
-   * Thrown when constructing an HdfsStorageDescriptor from an invalid/unsupported
-   * metastore storage descriptor.
-   * TODO: Get rid of this class.
-   */
-  public static class InvalidStorageDescriptorException extends CatalogException {
-    // Mandatory since Exception implements Serialisable
-    private static final long serialVersionUID = -555234913768134760L;
-    public InvalidStorageDescriptorException(String s) { super(s); }
-    public InvalidStorageDescriptorException(Exception ex) {
-      super(ex.getMessage(), ex);
-    }
-    public InvalidStorageDescriptorException(String msg, Throwable cause) {
-      super(msg, cause);
-    }
-  }
-
-  /**
-   * Constructs a new HdfsStorageDescriptor from a StorageDescriptor retrieved from the
-   * metastore.
-   *
-   * @throws InvalidStorageDescriptorException - if the storage descriptor has invalid
-   * delimiters, an unsupported SerDe, or an unknown file format.
-   */
-  public static HdfsStorageDescriptor fromStorageDescriptor(String tblName,
-      StorageDescriptor sd)
-      throws InvalidStorageDescriptorException {
-    Map<String, Byte> delimMap = extractDelimiters(sd.getSerdeInfo());
-    if (!COMPATIBLE_SERDES.contains(sd.getSerdeInfo().getSerializationLib())) {
-      throw new InvalidStorageDescriptorException(String.format("Impala does not " +
-          "support tables of this type. REASON: SerDe library '%s' is not " +
-          "supported.", sd.getSerdeInfo().getSerializationLib()));
-    }
-    // Extract the blocksize and compression specification from the SerDe parameters,
-    // if present.
-    Map<String, String> parameters = sd.getSerdeInfo().getParameters();
-    int blockSize = 0;
-    String blockValue = parameters.get(BLOCK_SIZE);
-    if (blockValue != null) {
-      blockSize = Integer.parseInt(blockValue);
-    }
-
-    try {
-      return new HdfsStorageDescriptor(tblName,
-          HdfsFileFormat.fromJavaClassName(sd.getInputFormat()),
-          delimMap.get(serdeConstants.LINE_DELIM),
-          delimMap.get(serdeConstants.FIELD_DELIM),
-          delimMap.get(serdeConstants.COLLECTION_DELIM),
-          delimMap.get(serdeConstants.MAPKEY_DELIM),
-          delimMap.get(serdeConstants.ESCAPE_CHAR),
-          delimMap.get(serdeConstants.QUOTE_CHAR),
-          blockSize);
-    } catch (IllegalArgumentException ex) {
-      // Thrown by fromJavaClassName
-      throw new InvalidStorageDescriptorException(ex);
-    }
-  }
-
-  public byte getLineDelim() { return lineDelim_; }
-  public byte getFieldDelim() { return fieldDelim_; }
-  public byte getCollectionDelim() { return collectionDelim_; }
-  public byte getMapKeyDelim() { return mapKeyDelim_; }
-  public byte getEscapeChar() { return escapeChar_; }
-  public byte getQuoteChar() { return quoteChar_; }
-  public HdfsFileFormat getFileFormat() { return fileFormat_; }
-  public int getBlockSize() { return blockSize_; }
-}


[47/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java b/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
deleted file mode 100644
index a931489..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
+++ /dev/null
@@ -1,2932 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.IdentityHashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.util.DisjointSet;
-import com.cloudera.impala.util.EventSequence;
-import com.cloudera.impala.util.ListMap;
-import com.cloudera.impala.util.TSessionStateUtil;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Repository of analysis state for single select block.
- *
- * Conjuncts:
- * Conjuncts are registered during analysis (registerConjuncts()) and assigned during the
- * planning process (getUnassigned[Oj]Conjuncts()/isConjunctAssigned()/
- * markConjunctsAssigned()).
- * All conjuncts are assigned a unique id when initially registered, and all registered
- * conjuncts are referenced by their id (ie, there are no containers other than the one
- * holding the referenced conjuncts), to make substitute() simple.
- *
- * Slot equivalence classes:
- * Equivalence of individual slots is computed based on registered equality predicates;
- * those predicates are either present directly in the query or are implied by the
- * syntactic elements used in query (example: a GROUP BY clause has implied equality
- * predicates between the grouping exprs and the grouping slots of the aggregation
- * output tuple).
- * Implied equality predicates are registered with createAuxEquivPredicate(); they are
- * never assigned during plan generation.
- * Also tracks each catalog object access, so authorization checks can be performed once
- * analysis is complete.
- * TODO: We often use the terms stmt/block/analyzer interchangeably, although they may
- * have slightly different meanings (sometimes depending on the context). Use the terms
- * more accurately and consistently here and elsewhere.
- */
-public class Analyzer {
-  // Common analysis error messages
-  public final static String DB_DOES_NOT_EXIST_ERROR_MSG = "Database does not exist: ";
-  public final static String DB_ALREADY_EXISTS_ERROR_MSG = "Database already exists: ";
-  public final static String TBL_DOES_NOT_EXIST_ERROR_MSG = "Table does not exist: ";
-  public final static String TBL_ALREADY_EXISTS_ERROR_MSG = "Table already exists: ";
-  public final static String FN_DOES_NOT_EXIST_ERROR_MSG = "Function does not exist: ";
-  public final static String FN_ALREADY_EXISTS_ERROR_MSG = "Function already exists: ";
-  public final static String DATA_SRC_DOES_NOT_EXIST_ERROR_MSG =
-      "Data source does not exist: ";
-  public final static String DATA_SRC_ALREADY_EXISTS_ERROR_MSG =
-      "Data source already exists: ";
-
-  private final static Logger LOG = LoggerFactory.getLogger(Analyzer.class);
-
-  private final User user_;
-
-  // Indicates whether this query block contains a straight join hint.
-  private boolean isStraightJoin_ = false;
-
-  // Whether to use Hive's auto-generated column labels.
-  private boolean useHiveColLabels_ = false;
-
-  // True if the corresponding select block has a limit and/or offset clause.
-  private boolean hasLimitOffsetClause_ = false;
-
-  // Current depth of nested analyze() calls. Used for enforcing a
-  // maximum expr-tree depth. Needs to be manually maintained by the user
-  // of this Analyzer with incrementCallDepth() and decrementCallDepth().
-  private int callDepth_ = 0;
-
-  // Flag indicating if this analyzer instance belongs to a subquery.
-  private boolean isSubquery_ = false;
-
-  // Flag indicating whether this analyzer belongs to a WITH clause view.
-  private boolean isWithClause_ = false;
-
-  // If set, when privilege requests are registered they will use this error
-  // error message.
-  private String authErrorMsg_;
-
-  // If false, privilege requests will not be registered in the analyzer.
-  private boolean enablePrivChecks_ = true;
-
-  // By default, all registered semi-joined tuples are invisible, i.e., their slots
-  // cannot be referenced. If set, this semi-joined tuple is made visible. Such a tuple
-  // should only be made visible for analyzing the On-clause of its semi-join.
-  // In particular, if there are multiple semi-joins in the same query block, then the
-  // On-clause of any such semi-join is not allowed to reference other semi-joined tuples
-  // except its own. Therefore, only a single semi-joined tuple can be visible at a time.
-  private TupleId visibleSemiJoinedTupleId_ = null;
-
-  public void setIsSubquery() {
-    isSubquery_ = true;
-    globalState_.containsSubquery = true;
-  }
-  public boolean isSubquery() { return isSubquery_; }
-  public boolean setHasPlanHints() { return globalState_.hasPlanHints = true; }
-  public boolean hasPlanHints() { return globalState_.hasPlanHints; }
-  public void setIsWithClause() { isWithClause_ = true; }
-  public boolean isWithClause() { return isWithClause_; }
-
-  // state shared between all objects of an Analyzer tree
-  // TODO: Many maps here contain properties about tuples, e.g., whether
-  // a tuple is outer/semi joined, etc. Remove the maps in favor of making
-  // them properties of the tuple descriptor itself.
-  private static class GlobalState {
-    // TODO: Consider adding an "exec-env"-like global singleton that contains the
-    // catalog and authzConfig.
-    public final ImpaladCatalog catalog;
-    public final TQueryCtx queryCtx;
-    public final AuthorizationConfig authzConfig;
-    public final DescriptorTable descTbl = new DescriptorTable();
-    public final IdGenerator<ExprId> conjunctIdGenerator = ExprId.createGenerator();
-    public final ColumnLineageGraph lineageGraph;
-
-    // True if we are analyzing an explain request. Should be set before starting
-    // analysis.
-    public boolean isExplain;
-
-    // Indicates whether the query has plan hints.
-    public boolean hasPlanHints = false;
-
-    // True if at least one of the analyzers belongs to a subquery.
-    public boolean containsSubquery = false;
-
-    // all registered conjuncts (map from expr id to conjunct)
-    public final Map<ExprId, Expr> conjuncts = Maps.newHashMap();
-
-    // all registered conjuncts bound by a single tuple id; used in getBoundPredicates()
-    public final ArrayList<ExprId> singleTidConjuncts = Lists.newArrayList();
-
-    // eqJoinConjuncts[tid] contains all conjuncts of the form
-    // "<lhs> = <rhs>" in which either lhs or rhs is fully bound by tid
-    // and the other side is not bound by tid (ie, predicates that express equi-join
-    // conditions between two tablerefs).
-    // A predicate such as "t1.a = t2.b" has two entries, one for 't1' and
-    // another one for 't2'.
-    public final Map<TupleId, List<ExprId>> eqJoinConjuncts = Maps.newHashMap();
-
-    // set of conjuncts that have been assigned to some PlanNode
-    public Set<ExprId> assignedConjuncts =
-        Collections.newSetFromMap(new IdentityHashMap<ExprId, Boolean>());
-
-    // map from outer-joined tuple id, i.e., one that is nullable,
-    // to the last Join clause (represented by its rhs table ref) that outer-joined it
-    public final Map<TupleId, TableRef> outerJoinedTupleIds = Maps.newHashMap();
-
-    // Map of registered conjunct to the last full outer join (represented by its
-    // rhs table ref) that outer joined it.
-    public final Map<ExprId, TableRef> fullOuterJoinedConjuncts = Maps.newHashMap();
-
-    // Map of full-outer-joined tuple id to the last full outer join that outer-joined it
-    public final Map<TupleId, TableRef> fullOuterJoinedTupleIds = Maps.newHashMap();
-
-    // Map from semi-joined tuple id, i.e., one that is invisible outside the join's
-    // On-clause, to its Join clause (represented by its rhs table ref). An anti-join is
-    // a kind of semi-join, so anti-joined tuples are also registered here.
-    public final Map<TupleId, TableRef> semiJoinedTupleIds = Maps.newHashMap();
-
-    // Map from right-hand side table-ref id of an outer join to the list of
-    // conjuncts in its On clause. There is always an entry for an outer join, but the
-    // corresponding value could be an empty list. There is no entry for non-outer joins.
-    public final Map<TupleId, List<ExprId>> conjunctsByOjClause = Maps.newHashMap();
-
-    // map from registered conjunct to its containing outer join On clause (represented
-    // by its right-hand side table ref); this is limited to conjuncts that can only be
-    // correctly evaluated by the originating outer join, including constant conjuncts
-    public final Map<ExprId, TableRef> ojClauseByConjunct = Maps.newHashMap();
-
-    // map from registered conjunct to its containing semi join On clause (represented
-    // by its right-hand side table ref)
-    public final Map<ExprId, TableRef> sjClauseByConjunct = Maps.newHashMap();
-
-    // map from registered conjunct to its containing inner join On clause (represented
-    // by its right-hand side table ref)
-    public final Map<ExprId, TableRef> ijClauseByConjunct = Maps.newHashMap();
-
-    // map from slot id to the analyzer/block in which it was registered
-    public final Map<SlotId, Analyzer> blockBySlot = Maps.newHashMap();
-
-    // Tracks all privilege requests on catalog objects.
-    private final Set<PrivilegeRequest> privilegeReqs = Sets.newLinkedHashSet();
-
-    // List of PrivilegeRequest to custom authorization failure error message.
-    // Tracks all privilege requests on catalog objects that need a custom
-    // error message returned to avoid exposing existence of catalog objects.
-    private final List<Pair<PrivilegeRequest, String>> maskedPrivilegeReqs =
-        Lists.newArrayList();
-
-    // accesses to catalog objects
-    // TODO: This can be inferred from privilegeReqs. They should be coalesced.
-    public Set<TAccessEvent> accessEvents = Sets.newHashSet();
-
-    // Tracks all warnings (e.g. non-fatal errors) that were generated during analysis.
-    // These are passed to the backend and eventually propagated to the shell. Maps from
-    // warning message to the number of times that warning was logged (in order to avoid
-    // duplicating the same warning over and over).
-    public final LinkedHashMap<String, Integer> warnings =
-        new LinkedHashMap<String, Integer>();
-
-    public final IdGenerator<EquivalenceClassId> equivClassIdGenerator =
-        EquivalenceClassId.createGenerator();
-
-    // map from equivalence class id to the list of its member slots
-    private final Map<EquivalenceClassId, ArrayList<SlotId>> equivClassMembers =
-        Maps.newHashMap();
-
-    // map from slot id to its equivalence class id;
-    // only visible at the root Analyzer
-    private final Map<SlotId, EquivalenceClassId> equivClassBySlotId = Maps.newHashMap();
-
-    // map for each slot to the canonical slot of its equivalence class
-    private final ExprSubstitutionMap equivClassSmap = new ExprSubstitutionMap();
-
-    // represents the direct and transitive value transfers between slots
-    private ValueTransferGraph valueTransferGraph;
-
-    private final List<Pair<SlotId, SlotId>> registeredValueTransfers =
-        Lists.newArrayList();
-
-    // Bidirectional map between Integer index and TNetworkAddress.
-    // Decreases the size of the scan range locations.
-    private final ListMap<TNetworkAddress> hostIndex = new ListMap<TNetworkAddress>();
-
-    // Timeline of important events in the planning process, used for debugging /
-    // profiling
-    private final EventSequence timeline = new EventSequence("Planner Timeline");
-
-    public GlobalState(ImpaladCatalog catalog, TQueryCtx queryCtx,
-        AuthorizationConfig authzConfig) {
-      this.catalog = catalog;
-      this.queryCtx = queryCtx;
-      this.authzConfig = authzConfig;
-      this.lineageGraph = new ColumnLineageGraph();
-    }
-  };
-
-  private final GlobalState globalState_;
-
-  public boolean containsSubquery() { return globalState_.containsSubquery; }
-
-  /**
-   * Helper function to reset the global state information about the existence of
-   * subqueries.
-   */
-  public void resetSubquery() { globalState_.containsSubquery = false; }
-
-  // An analyzer stores analysis state for a single select block. A select block can be
-  // a top level select statement, or an inline view select block.
-  // ancestors contains the Analyzers of the enclosing select blocks of 'this'
-  // (ancestors[0] contains the immediate parent, etc.).
-  private final ArrayList<Analyzer> ancestors_;
-
-  // map from lowercase table alias to a view definition in this analyzer's scope
-  private final Map<String, View> localViews_ = Maps.newHashMap();
-
-  // Map from lowercase table alias to descriptor. Tables without an explicit alias
-  // are assigned two implicit aliases: the unqualified and fully-qualified table name.
-  // Such tables have two entries pointing to the same descriptor. If an alias is
-  // ambiguous, then this map retains the first entry with that alias to simplify error
-  // checking (duplicate vs. ambiguous alias).
-  private final Map<String, TupleDescriptor> aliasMap_ = Maps.newHashMap();
-
-  // Map from tuple id to its corresponding table ref.
-  private final Map<TupleId, TableRef> tableRefMap_ = Maps.newHashMap();
-
-  // Set of lowercase ambiguous implicit table aliases.
-  private final Set<String> ambiguousAliases_ = Sets.newHashSet();
-
-  // Map from lowercase fully-qualified path to its slot descriptor. Only contains paths
-  // that have a scalar type as destination (see registerSlotRef()).
-  private final Map<String, SlotDescriptor> slotPathMap_ = Maps.newHashMap();
-
-  // Tracks the all tables/views found during analysis that were missing metadata.
-  private Set<TableName> missingTbls_ = new HashSet<TableName>();
-
-  // Indicates whether this analyzer/block is guaranteed to have an empty result set
-  // due to a limit 0 or constant conjunct evaluating to false.
-  private boolean hasEmptyResultSet_ = false;
-
-  // Indicates whether the select-project-join (spj) portion of this query block
-  // is guaranteed to return an empty result set. Set due to a constant non-Having
-  // conjunct evaluating to false.
-  private boolean hasEmptySpjResultSet_ = false;
-
-  public Analyzer(ImpaladCatalog catalog, TQueryCtx queryCtx,
-      AuthorizationConfig authzConfig) {
-    ancestors_ = Lists.newArrayList();
-    globalState_ = new GlobalState(catalog, queryCtx, authzConfig);
-    user_ = new User(TSessionStateUtil.getEffectiveUser(queryCtx.session));
-  }
-
-  /**
-   * Analyzer constructor for nested select block. GlobalState is inherited from the
-   * parentAnalyzer.
-   */
-  public Analyzer(Analyzer parentAnalyzer) {
-    this(parentAnalyzer, parentAnalyzer.globalState_);
-  }
-
-  /**
-   * Analyzer constructor for nested select block with the specified global state.
-   */
-  private Analyzer(Analyzer parentAnalyzer, GlobalState globalState) {
-    ancestors_ = Lists.newArrayList(parentAnalyzer);
-    ancestors_.addAll(parentAnalyzer.ancestors_);
-    globalState_ = globalState;
-    missingTbls_ = parentAnalyzer.missingTbls_;
-    user_ = parentAnalyzer.getUser();
-    useHiveColLabels_ = parentAnalyzer.useHiveColLabels_;
-    authErrorMsg_ = parentAnalyzer.authErrorMsg_;
-    enablePrivChecks_ = parentAnalyzer.enablePrivChecks_;
-    isWithClause_ = parentAnalyzer.isWithClause_;
-  }
-
-  /**
-   * Returns a new analyzer with the specified parent analyzer but with a new
-   * global state.
-   */
-  public static Analyzer createWithNewGlobalState(Analyzer parentAnalyzer) {
-    GlobalState globalState = new GlobalState(parentAnalyzer.globalState_.catalog,
-        parentAnalyzer.getQueryCtx(), parentAnalyzer.getAuthzConfig());
-    return new Analyzer(parentAnalyzer, globalState);
-  }
-
-  /**
-   * Makes the given semi-joined tuple visible such that its slots can be referenced.
-   * If tid is null, makes the currently visible semi-joined tuple invisible again.
-   */
-  public void setVisibleSemiJoinedTuple(TupleId tid) {
-    Preconditions.checkState(tid == null
-        || globalState_.semiJoinedTupleIds.containsKey(tid));
-    Preconditions.checkState(tid == null || visibleSemiJoinedTupleId_ == null);
-    visibleSemiJoinedTupleId_ = tid;
-  }
-
-  public Set<TableName> getMissingTbls() { return missingTbls_; }
-  public boolean hasMissingTbls() { return !missingTbls_.isEmpty(); }
-  public boolean hasAncestors() { return !ancestors_.isEmpty(); }
-  public Analyzer getParentAnalyzer() {
-    return hasAncestors() ? ancestors_.get(0) : null;
-  }
-
-  /**
-   * Returns the analyzer that has an entry for the given tuple descriptor in its
-   * tableRefMap, or null if no such analyzer could be found. Searches the hierarchy
-   * of analyzers bottom-up.
-   */
-  public Analyzer findAnalyzer(TupleId tid) {
-    if (tableRefMap_.containsKey(tid)) return this;
-    if (hasAncestors()) return getParentAnalyzer().findAnalyzer(tid);
-    return null;
-  }
-
-  /**
-   * Returns a list of each warning logged, indicating if it was logged more than once.
-   */
-  public List<String> getWarnings() {
-    List<String> result = new ArrayList<String>();
-    for (Map.Entry<String, Integer> e : globalState_.warnings.entrySet()) {
-      String error = e.getKey();
-      int count = e.getValue();
-      Preconditions.checkState(count > 0);
-      if (count == 1) {
-        result.add(error);
-      } else {
-        result.add(error + " (" + count + " warnings like this)");
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Registers a local view definition with this analyzer. Throws an exception if a view
-   * definition with the same alias has already been registered or if the number of
-   * explicit column labels is greater than the number of columns in the view statement.
-   */
-  public void registerLocalView(View view) throws AnalysisException {
-    Preconditions.checkState(view.isLocalView());
-    if (view.hasColLabels()) {
-      List<String> viewLabels = view.getColLabels();
-      List<String> queryStmtLabels = view.getQueryStmt().getColLabels();
-      if (viewLabels.size() > queryStmtLabels.size()) {
-        throw new AnalysisException("WITH-clause view '" + view.getName() +
-            "' returns " + queryStmtLabels.size() + " columns, but " +
-            viewLabels.size() + " labels were specified. The number of column " +
-            "labels must be smaller or equal to the number of returned columns.");
-      }
-    }
-    if (localViews_.put(view.getName().toLowerCase(), view) != null) {
-      throw new AnalysisException(
-          String.format("Duplicate table alias: '%s'", view.getName()));
-    }
-  }
-
-  /**
-   * Creates an returns an empty TupleDescriptor for the given table ref and registers
-   * it against all its legal aliases. For tables refs with an explicit alias, only the
-   * explicit alias is legal. For tables refs with no explicit alias, the fully-qualified
-   * and unqualified table names are legal aliases. Column references against unqualified
-   * implicit aliases can be ambiguous, therefore, we register such ambiguous aliases
-   * here. Requires that all views have been substituted.
-   * Throws if an existing explicit alias or implicit fully-qualified alias
-   * has already been registered for another table ref.
-   */
-  public TupleDescriptor registerTableRef(TableRef ref) throws AnalysisException {
-    String uniqueAlias = ref.getUniqueAlias();
-    if (aliasMap_.containsKey(uniqueAlias)) {
-      throw new AnalysisException("Duplicate table alias: '" + uniqueAlias + "'");
-    }
-
-    // If ref has no explicit alias, then the unqualified and the fully-qualified table
-    // names are legal implicit aliases. Column references against unqualified implicit
-    // aliases can be ambiguous, therefore, we register such ambiguous aliases here.
-    String unqualifiedAlias = null;
-    String[] aliases = ref.getAliases();
-    if (aliases.length > 1) {
-      unqualifiedAlias = aliases[1];
-      TupleDescriptor tupleDesc = aliasMap_.get(unqualifiedAlias);
-      if (tupleDesc != null) {
-        if (tupleDesc.hasExplicitAlias()) {
-          throw new AnalysisException(
-              "Duplicate table alias: '" + unqualifiedAlias + "'");
-        } else {
-          ambiguousAliases_.add(unqualifiedAlias);
-        }
-      }
-    }
-
-    // Delegate creation of the tuple descriptor to the concrete table ref.
-    TupleDescriptor result = ref.createTupleDescriptor(this);
-    result.setAliases(aliases, ref.hasExplicitAlias());
-    // Register all legal aliases.
-    for (String alias: aliases) {
-      aliasMap_.put(alias, result);
-    }
-    tableRefMap_.put(result.getId(), ref);
-    return result;
-  }
-
-  /**
-   * Resolves the given TableRef into a concrete BaseTableRef, ViewRef or
-   * CollectionTableRef. Returns the new resolved table ref or the given table
-   * ref if it is already resolved.
-   * Registers privilege requests and throws an AnalysisException if the tableRef's
-   * path could not be resolved. The privilege requests are added to ensure that
-   * an AuthorizationException is preferred over an AnalysisException so as not to
-   * accidentally reveal the non-existence of tables/databases.
-   */
-  public TableRef resolveTableRef(TableRef tableRef) throws AnalysisException {
-    // Return the table if it is already resolved.
-    if (tableRef.isResolved()) return tableRef;
-    // Try to find a matching local view.
-    if (tableRef.getPath().size() == 1) {
-      // Searches the hierarchy of analyzers bottom-up for a registered local view with
-      // a matching alias.
-      String viewAlias = tableRef.getPath().get(0).toLowerCase();
-      Analyzer analyzer = this;
-      do {
-        View localView = analyzer.localViews_.get(viewAlias);
-        if (localView != null) return new InlineViewRef(localView, tableRef);
-        analyzer = (analyzer.ancestors_.isEmpty() ? null : analyzer.ancestors_.get(0));
-      } while (analyzer != null);
-    }
-
-    // Resolve the table ref's path and determine what resolved table ref
-    // to replace it with.
-    List<String> rawPath = tableRef.getPath();
-    Path resolvedPath = null;
-    try {
-      resolvedPath = resolvePath(tableRef.getPath(), PathType.TABLE_REF);
-    } catch (AnalysisException e) {
-      if (!hasMissingTbls()) {
-        // Register privilege requests to prefer reporting an authorization error over
-        // an analysis error. We should not accidentally reveal the non-existence of a
-        // table/database if the user is not authorized.
-        if (rawPath.size() > 1) {
-          registerPrivReq(new PrivilegeRequestBuilder()
-              .onTable(rawPath.get(0), rawPath.get(1))
-              .allOf(Privilege.SELECT).toRequest());
-        }
-        registerPrivReq(new PrivilegeRequestBuilder()
-            .onTable(getDefaultDb(), rawPath.get(0))
-            .allOf(Privilege.SELECT).toRequest());
-      }
-      throw e;
-    } catch (TableLoadingException e) {
-      throw new AnalysisException(String.format(
-          "Failed to load metadata for table: '%s'", Joiner.on(".").join(rawPath)), e);
-    }
-
-    Preconditions.checkNotNull(resolvedPath);
-    if (resolvedPath.destTable() != null) {
-      Table table = resolvedPath.destTable();
-      Preconditions.checkNotNull(table);
-      if (table instanceof View) return new InlineViewRef((View) table, tableRef);
-      // The table must be a base table.
-      Preconditions.checkState(table instanceof HdfsTable ||
-          table instanceof KuduTable ||
-          table instanceof HBaseTable ||
-          table instanceof DataSourceTable);
-      return new BaseTableRef(tableRef, resolvedPath);
-    } else {
-      return new CollectionTableRef(tableRef, resolvedPath);
-    }
-  }
-
-  /**
-   * Register conjuncts that are outer joined by a full outer join. For a given
-   * predicate, we record the last full outer join that outer-joined any of its
-   * tuple ids. We need this additional information because full-outer joins obey
-   * different rules with respect to predicate pushdown compared to left and right
-   * outer joins.
-   */
-  public void registerFullOuterJoinedConjunct(Expr e) {
-    Preconditions.checkState(
-        !globalState_.fullOuterJoinedConjuncts.containsKey(e.getId()));
-    List<TupleId> tids = Lists.newArrayList();
-    e.getIds(tids, null);
-    for (TupleId tid: tids) {
-      if (!globalState_.fullOuterJoinedTupleIds.containsKey(tid)) continue;
-      TableRef currentOuterJoin = globalState_.fullOuterJoinedTupleIds.get(tid);
-      globalState_.fullOuterJoinedConjuncts.put(e.getId(), currentOuterJoin);
-      break;
-    }
-    LOG.trace("registerFullOuterJoinedConjunct: " +
-        globalState_.fullOuterJoinedConjuncts.toString());
-  }
-
-  /**
-   * Register tids as being outer-joined by a full outer join clause represented by
-   * rhsRef.
-   */
-  public void registerFullOuterJoinedTids(List<TupleId> tids, TableRef rhsRef) {
-    for (TupleId tid: tids) {
-      globalState_.fullOuterJoinedTupleIds.put(tid, rhsRef);
-    }
-    LOG.trace("registerFullOuterJoinedTids: " +
-        globalState_.fullOuterJoinedTupleIds.toString());
-  }
-
-  /**
-   * Register tids as being outer-joined by Join clause represented by rhsRef.
-   */
-  public void registerOuterJoinedTids(List<TupleId> tids, TableRef rhsRef) {
-    for (TupleId tid: tids) {
-      globalState_.outerJoinedTupleIds.put(tid, rhsRef);
-    }
-    LOG.trace("registerOuterJoinedTids: " + globalState_.outerJoinedTupleIds.toString());
-  }
-
-  /**
-   * Register the given tuple id as being the invisible side of a semi-join.
-   */
-  public void registerSemiJoinedTid(TupleId tid, TableRef rhsRef) {
-    globalState_.semiJoinedTupleIds.put(tid, rhsRef);
-  }
-
-  /**
-   * Returns the descriptor of the given explicit or implicit table alias or null if no
-   * such alias has been registered.
-   * Throws an AnalysisException if the given table alias is ambiguous.
-   */
-  public TupleDescriptor getDescriptor(String tableAlias) throws AnalysisException {
-    String lookupAlias = tableAlias.toLowerCase();
-    if (ambiguousAliases_.contains(lookupAlias)) {
-      throw new AnalysisException(String.format(
-          "Unqualified table alias is ambiguous: '%s'", tableAlias));
-    }
-    return aliasMap_.get(lookupAlias);
-  }
-
-  public TupleDescriptor getTupleDesc(TupleId id) {
-    return globalState_.descTbl.getTupleDesc(id);
-  }
-
-  public SlotDescriptor getSlotDesc(SlotId id) {
-    return globalState_.descTbl.getSlotDesc(id);
-  }
-
-  public TableRef getTableRef(TupleId tid) { return tableRefMap_.get(tid); }
-
-  /**
-   * Given a "table alias"."column alias", return the SlotDescriptor
-   */
-  public SlotDescriptor getSlotDescriptor(String qualifiedColumnName) {
-    return slotPathMap_.get(qualifiedColumnName);
-  }
-
-  /**
-   * Return true if this analyzer has no ancestors. (i.e. false for the analyzer created
-   * for inline views/ union operands, etc.)
-   */
-  public boolean isRootAnalyzer() { return ancestors_.isEmpty(); }
-
-  /**
-   * Returns true if the query block corresponding to this analyzer is guaranteed
-   * to return an empty result set, e.g., due to a limit 0 or a constant predicate
-   * that evaluates to false.
-   */
-  public boolean hasEmptyResultSet() { return hasEmptyResultSet_; }
-  public void setHasEmptyResultSet() { hasEmptyResultSet_ = true; }
-
-  /**
-   * Returns true if the select-project-join portion of this query block returns
-   * an empty result set.
-   */
-  public boolean hasEmptySpjResultSet() { return hasEmptySpjResultSet_; }
-
-  /**
-   * Resolves the given raw path according to the given path type, as follows:
-   * SLOT_REF and STAR: Resolves the path in the context of all registered tuple
-   * descriptors, considering qualified as well as unqualified matches.
-   * TABLE_REF: Resolves the path in the context of all registered tuple descriptors
-   * only considering qualified matches, as well as catalog tables/views.
-   *
-   * Path resolution:
-   * Regardless of the path type, a raw path can have multiple successful resolutions.
-   * A resolution is said to be 'successful' if all raw path elements can be mapped
-   * to a corresponding alias/table/column/field.
-   *
-   * Path legality:
-   * A successful resolution may be illegal with respect to the path type, e.g.,
-   * a SlotRef cannot reference intermediate collection types, etc.
-   *
-   * Path ambiguity:
-   * A raw path is ambiguous if it has multiple legal resolutions. Otherwise,
-   * the ambiguity is resolved in favor of the legal resolution.
-   *
-   * Returns the single legal path resolution if it exists.
-   * Throws if there was no legal resolution or if the path is ambiguous.
-   */
-  public Path resolvePath(List<String> rawPath, PathType pathType)
-      throws AnalysisException, TableLoadingException {
-    // We only allow correlated references in predicates of a subquery.
-    boolean resolveInAncestors = false;
-    if (pathType == PathType.TABLE_REF || pathType == PathType.ANY) {
-      resolveInAncestors = true;
-    } else if (pathType == PathType.SLOT_REF) {
-      resolveInAncestors = isSubquery_;
-    }
-    // Convert all path elements to lower case.
-    ArrayList<String> lcRawPath = Lists.newArrayListWithCapacity(rawPath.size());
-    for (String s: rawPath) lcRawPath.add(s.toLowerCase());
-    return resolvePath(lcRawPath, pathType, resolveInAncestors);
-  }
-
-  private Path resolvePath(List<String> rawPath, PathType pathType,
-      boolean resolveInAncestors) throws AnalysisException, TableLoadingException {
-    // List of all candidate paths with different roots. Paths in this list are initially
-    // unresolved and may be illegal with respect to the pathType.
-    List<Path> candidates = getTupleDescPaths(rawPath);
-
-    LinkedList<String> errors = Lists.newLinkedList();
-    if (pathType == PathType.SLOT_REF || pathType == PathType.STAR) {
-      // Paths rooted at all of the unique registered tuple descriptors.
-      for (TableRef tblRef: tableRefMap_.values()) {
-        candidates.add(new Path(tblRef.getDesc(), rawPath));
-      }
-    } else {
-      // Always prefer table ref paths rooted at a registered tuples descriptor.
-      Preconditions.checkState(pathType == PathType.TABLE_REF ||
-          pathType == PathType.ANY);
-      Path result = resolvePaths(rawPath, candidates, pathType, errors);
-      if (result != null) return result;
-      candidates.clear();
-
-      // Add paths rooted at a table with an unqualified and fully-qualified table name.
-      int end = Math.min(2, rawPath.size());
-      for (int tblNameIdx = 0; tblNameIdx < end; ++tblNameIdx) {
-        String dbName = (tblNameIdx == 0) ? getDefaultDb() : rawPath.get(0);
-        String tblName = rawPath.get(tblNameIdx);
-        Table tbl = null;
-        try {
-          tbl = getTable(dbName, tblName);
-        } catch (AnalysisException e) {
-          if (hasMissingTbls()) throw e;
-          // Ignore other exceptions to allow path resolution to continue.
-        }
-        if (tbl != null) {
-          candidates.add(new Path(tbl, rawPath.subList(tblNameIdx + 1, rawPath.size())));
-        }
-      }
-    }
-
-    Path result = resolvePaths(rawPath, candidates, pathType, errors);
-    if (result == null && resolveInAncestors && hasAncestors()) {
-      result = getParentAnalyzer().resolvePath(rawPath, pathType, true);
-    }
-    if (result == null) {
-      Preconditions.checkState(!errors.isEmpty());
-      throw new AnalysisException(errors.getFirst());
-    }
-    return result;
-  }
-
-  /**
-   * Returns a list of unresolved Paths that are rooted at a registered tuple
-   * descriptor matching a prefix of the given raw path.
-   */
-  public List<Path> getTupleDescPaths(List<String> rawPath)
-      throws AnalysisException {
-    ArrayList<Path> result = Lists.newArrayList();
-
-    // Path rooted at a tuple desc with an explicit or implicit unqualified alias.
-    TupleDescriptor rootDesc = getDescriptor(rawPath.get(0));
-    if (rootDesc != null) {
-      result.add(new Path(rootDesc, rawPath.subList(1, rawPath.size())));
-    }
-
-    // Path rooted at a tuple desc with an implicit qualified alias.
-    if (rawPath.size() > 1) {
-      rootDesc = getDescriptor(rawPath.get(0) + "." + rawPath.get(1));
-      if (rootDesc != null) {
-        result.add(new Path(rootDesc, rawPath.subList(2, rawPath.size())));
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Resolves the given paths and checks them for legality and ambiguity. Returns the
-   * single legal path resolution if it exists, null otherwise.
-   * Populates 'errors' with a a prioritized list of error messages starting with the
-   * most relevant one. The list contains at least one error message if null is returned.
-   */
-  private Path resolvePaths(List<String> rawPath, List<Path> paths, PathType pathType,
-      LinkedList<String> errors) {
-    // For generating error messages.
-    String pathTypeStr = null;
-    String pathStr = Joiner.on(".").join(rawPath);
-    if (pathType == PathType.SLOT_REF) {
-      pathTypeStr = "Column/field reference";
-    } else if (pathType == PathType.TABLE_REF) {
-      pathTypeStr = "Table reference";
-    } else if (pathType == PathType.ANY) {
-      pathTypeStr = "Path";
-    } else {
-      Preconditions.checkState(pathType == PathType.STAR);
-      pathTypeStr = "Star expression";
-      pathStr += ".*";
-    }
-
-    List<Path> legalPaths = Lists.newArrayList();
-    for (Path p: paths) {
-      if (!p.resolve()) continue;
-
-      // Check legality of the resolved path.
-      if (p.isRootedAtTuple() && !isVisible(p.getRootDesc().getId())) {
-        errors.addLast(String.format(
-            "Illegal %s '%s' of semi-/anti-joined table '%s'",
-            pathTypeStr.toLowerCase(), pathStr, p.getRootDesc().getAlias()));
-        continue;
-      }
-      switch (pathType) {
-        // Illegal cases:
-        // 1. Destination type is not a collection.
-        case TABLE_REF: {
-          if (!p.destType().isCollectionType()) {
-            errors.addFirst(String.format(
-                "Illegal table reference to non-collection type: '%s'\n" +
-                    "Path resolved to type: %s", pathStr, p.destType().toSql()));
-            continue;
-          }
-          break;
-        }
-        case SLOT_REF: {
-          // Illegal cases:
-          // 1. Path contains an intermediate collection reference.
-          // 2. Destination of the path is a catalog table or a registered alias.
-          if (p.hasNonDestCollection()) {
-            errors.addFirst(String.format(
-                "Illegal column/field reference '%s' with intermediate " +
-                "collection '%s' of type '%s'",
-                pathStr, p.getFirstCollectionName(),
-                p.getFirstCollectionType().toSql()));
-            continue;
-          }
-          // Error should be "Could not resolve...". No need to add it here explicitly.
-          if (p.getMatchedTypes().isEmpty()) continue;
-          break;
-        }
-        // Illegal cases:
-        // 1. Path contains an intermediate collection reference.
-        // 2. Destination type of the path is not a struct.
-        case STAR: {
-          if (p.hasNonDestCollection()) {
-            errors.addFirst(String.format(
-                "Illegal star expression '%s' with intermediate " +
-                "collection '%s' of type '%s'",
-                pathStr, p.getFirstCollectionName(),
-                p.getFirstCollectionType().toSql()));
-            continue;
-          }
-          if (!p.destType().isStructType()) {
-            errors.addFirst(String.format(
-                "Cannot expand star in '%s' because path '%s' resolved to type '%s'." +
-                "\nStar expansion is only valid for paths to a struct type.",
-                pathStr, Joiner.on(".").join(rawPath), p.destType().toSql()));
-            continue;
-          }
-          break;
-        }
-        case ANY: {
-          // Any path is valid.
-          break;
-        }
-      }
-      legalPaths.add(p);
-    }
-
-    if (legalPaths.size() > 1) {
-      errors.addFirst(String.format("%s is ambiguous: '%s'",
-          pathTypeStr, pathStr));
-      return null;
-    }
-    if (legalPaths.isEmpty()) {
-      if (errors.isEmpty()) {
-        errors.addFirst(String.format("Could not resolve %s: '%s'",
-            pathTypeStr.toLowerCase(), pathStr));
-      }
-      return null;
-    }
-    return legalPaths.get(0);
-  }
-
-  /**
-   * Returns an existing or new SlotDescriptor for the given path. Always returns
-   * a new empty SlotDescriptor for paths with a collection-typed destination.
-   */
-  public SlotDescriptor registerSlotRef(Path slotPath) throws AnalysisException {
-    Preconditions.checkState(slotPath.isRootedAtTuple());
-    // Always register a new slot descriptor for collection types. The BE currently
-    // relies on this behavior for setting unnested collection slots to NULL.
-    if (slotPath.destType().isCollectionType()) {
-      SlotDescriptor result = addSlotDescriptor(slotPath.getRootDesc());
-      result.setPath(slotPath);
-      registerColumnPrivReq(result);
-      return result;
-    }
-    // SlotRefs with a scalar type are registered against the slot's
-    // fully-qualified lowercase path.
-    String key = slotPath.toString();
-    SlotDescriptor existingSlotDesc = slotPathMap_.get(key);
-    if (existingSlotDesc != null) return existingSlotDesc;
-    SlotDescriptor result = addSlotDescriptor(slotPath.getRootDesc());
-    result.setPath(slotPath);
-    slotPathMap_.put(key, result);
-    registerColumnPrivReq(result);
-    return result;
-  }
-
-  /**
-   * Registers a column-level privilege request if 'slotDesc' directly or indirectly
-   * refers to a table column. It handles both scalar and complex-typed columns.
-   */
-  private void registerColumnPrivReq(SlotDescriptor slotDesc) {
-    Preconditions.checkNotNull(slotDesc.getPath());
-    TupleDescriptor tupleDesc = slotDesc.getParent();
-    if (tupleDesc.isMaterialized() && tupleDesc.getTable() != null) {
-      Column column = tupleDesc.getTable().getColumn(
-          slotDesc.getPath().getRawPath().get(0));
-      if (column != null) {
-        registerPrivReq(new PrivilegeRequestBuilder().
-            allOf(Privilege.SELECT).onColumn(tupleDesc.getTableName().getDb(),
-            tupleDesc.getTableName().getTbl(), column.getName()).toRequest());
-      }
-    }
-  }
-
-  /**
-   * Creates a new slot descriptor and related state in globalState.
-   */
-  public SlotDescriptor addSlotDescriptor(TupleDescriptor tupleDesc) {
-    SlotDescriptor result = globalState_.descTbl.addSlotDescriptor(tupleDesc);
-    globalState_.blockBySlot.put(result.getId(), this);
-    return result;
-  }
-
-  /**
-   * Adds a new slot descriptor in tupleDesc that is identical to srcSlotDesc
-   * except for the path and slot id.
-   */
-  public SlotDescriptor copySlotDescriptor(SlotDescriptor srcSlotDesc,
-      TupleDescriptor tupleDesc) {
-    SlotDescriptor result = globalState_.descTbl.addSlotDescriptor(tupleDesc);
-    globalState_.blockBySlot.put(result.getId(), this);
-    result.setSourceExprs(srcSlotDesc.getSourceExprs());
-    result.setLabel(srcSlotDesc.getLabel());
-    result.setStats(srcSlotDesc.getStats());
-    result.setType(srcSlotDesc.getType());
-    result.setItemTupleDesc(srcSlotDesc.getItemTupleDesc());
-    return result;
-  }
-
-  /**
-   * Register all conjuncts in a list of predicates as Having-clause conjuncts.
-   */
-  public void registerConjuncts(List<Expr> l) throws AnalysisException {
-    for (Expr e: l) {
-      registerConjuncts(e, true);
-    }
-  }
-
-  /**
-   * Register all conjuncts in 'conjuncts' that make up the On-clause of the given
-   * right-hand side of a join. Assigns each conjunct a unique id. If rhsRef is
-   * the right-hand side of an outer join, then the conjuncts conjuncts are
-   * registered such that they can only be evaluated by the node implementing that
-   * join.
-   */
-  public void registerOnClauseConjuncts(List<Expr> conjuncts, TableRef rhsRef)
-      throws AnalysisException {
-    Preconditions.checkNotNull(rhsRef);
-    Preconditions.checkNotNull(conjuncts);
-    List<ExprId> ojConjuncts = null;
-    if (rhsRef.getJoinOp().isOuterJoin()) {
-      ojConjuncts = globalState_.conjunctsByOjClause.get(rhsRef.getId());
-      if (ojConjuncts == null) {
-        ojConjuncts = Lists.newArrayList();
-        globalState_.conjunctsByOjClause.put(rhsRef.getId(), ojConjuncts);
-      }
-    }
-    for (Expr conjunct: conjuncts) {
-      conjunct.setIsOnClauseConjunct(true);
-      registerConjunct(conjunct);
-      if (rhsRef.getJoinOp().isOuterJoin()) {
-        globalState_.ojClauseByConjunct.put(conjunct.getId(), rhsRef);
-        ojConjuncts.add(conjunct.getId());
-      }
-      if (rhsRef.getJoinOp().isSemiJoin()) {
-        globalState_.sjClauseByConjunct.put(conjunct.getId(), rhsRef);
-      }
-      if (rhsRef.getJoinOp().isInnerJoin()) {
-        globalState_.ijClauseByConjunct.put(conjunct.getId(), rhsRef);
-      }
-      markConstantConjunct(conjunct, false);
-    }
-  }
-
-  /**
-   * Register all conjuncts that make up 'e'. If fromHavingClause is false, this conjunct
-   * is assumed to originate from a WHERE or ON clause.
-   */
-  public void registerConjuncts(Expr e, boolean fromHavingClause)
-      throws AnalysisException {
-    for (Expr conjunct: e.getConjuncts()) {
-      registerConjunct(conjunct);
-      markConstantConjunct(conjunct, fromHavingClause);
-    }
-  }
-
-  /**
-   * If the given conjunct is a constant non-oj conjunct, marks it as assigned, and
-   * evaluates the conjunct. If the conjunct evaluates to false, marks this query
-   * block as having an empty result set or as having an empty select-project-join
-   * portion, if fromHavingClause is true or false, respectively.
-   * No-op if the conjunct is not constant or is outer joined.
-   * Throws an AnalysisException if there is an error evaluating `conjunct`
-   */
-  private void markConstantConjunct(Expr conjunct, boolean fromHavingClause)
-      throws AnalysisException {
-    if (!conjunct.isConstant() || isOjConjunct(conjunct)) return;
-    markConjunctAssigned(conjunct);
-    if ((!fromHavingClause && !hasEmptySpjResultSet_)
-        || (fromHavingClause && !hasEmptyResultSet_)) {
-      try {
-        if (!FeSupport.EvalPredicate(conjunct, globalState_.queryCtx)) {
-          if (fromHavingClause) {
-            hasEmptyResultSet_ = true;
-          } else {
-            hasEmptySpjResultSet_ = true;
-          }
-        }
-      } catch (InternalException ex) {
-        throw new AnalysisException("Error evaluating \"" + conjunct.toSql() + "\"", ex);
-      }
-    }
-  }
-
-  /**
-   * Assigns a new id to the given conjunct and registers it with all tuple and slot ids
-   * it references and with the global conjunct list.
-   */
-  private void registerConjunct(Expr e) {
-    // always generate a new expr id; this might be a cloned conjunct that already
-    // has the id of its origin set
-    e.setId(globalState_.conjunctIdGenerator.getNextId());
-    globalState_.conjuncts.put(e.getId(), e);
-
-    ArrayList<TupleId> tupleIds = Lists.newArrayList();
-    ArrayList<SlotId> slotIds = Lists.newArrayList();
-    e.getIds(tupleIds, slotIds);
-    registerFullOuterJoinedConjunct(e);
-
-    // register single tid conjuncts
-    if (tupleIds.size() == 1) globalState_.singleTidConjuncts.add(e.getId());
-
-    LOG.trace("register tuple/slotConjunct: " + Integer.toString(e.getId().asInt())
-        + " " + e.toSql() + " " + e.debugString());
-
-    if (!(e instanceof BinaryPredicate)) return;
-    BinaryPredicate binaryPred = (BinaryPredicate) e;
-
-    // check whether this is an equi-join predicate, ie, something of the
-    // form <expr1> = <expr2> where at least one of the exprs is bound by
-    // exactly one tuple id
-    if (binaryPred.getOp() != BinaryPredicate.Operator.EQ &&
-       binaryPred.getOp() != BinaryPredicate.Operator.NULL_MATCHING_EQ &&
-       binaryPred.getOp() != BinaryPredicate.Operator.NOT_DISTINCT) {
-      return;
-    }
-    // the binary predicate must refer to at least two tuples to be an eqJoinConjunct
-    if (tupleIds.size() < 2) return;
-
-    // examine children and update eqJoinConjuncts
-    for (int i = 0; i < 2; ++i) {
-      tupleIds = Lists.newArrayList();
-      binaryPred.getChild(i).getIds(tupleIds, null);
-      if (tupleIds.size() == 1) {
-        if (!globalState_.eqJoinConjuncts.containsKey(tupleIds.get(0))) {
-          List<ExprId> conjunctIds = Lists.newArrayList();
-          conjunctIds.add(e.getId());
-          globalState_.eqJoinConjuncts.put(tupleIds.get(0), conjunctIds);
-        } else {
-          globalState_.eqJoinConjuncts.get(tupleIds.get(0)).add(e.getId());
-        }
-        binaryPred.setIsEqJoinConjunct(true);
-        LOG.trace("register eqJoinConjunct: " + Integer.toString(e.getId().asInt()));
-      }
-    }
-  }
-
-  /**
-   * Create and register an auxiliary predicate to express an equivalence between two
-   * exprs (BinaryPredicate with EQ); this predicate does not need to be assigned, but
-   * it's used for equivalence class computation.
-   * Does nothing if the lhs or rhs expr are NULL. Registering an equivalence with NULL
-   * would be incorrect, because <expr> = NULL is false (even NULL = NULL).
-   */
-  public void createAuxEquivPredicate(Expr lhs, Expr rhs) {
-    // Check the expr type as well as the class because  NullLiteral could have been
-    // implicitly cast to a type different than NULL.
-    if (lhs instanceof NullLiteral || rhs instanceof NullLiteral ||
-        lhs.getType().isNull() || rhs.getType().isNull()) {
-      return;
-    }
-    // create an eq predicate between lhs and rhs
-    BinaryPredicate p = new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs);
-    p.setIsAuxExpr();
-    LOG.trace("register equiv predicate: " + p.toSql() + " " + p.debugString());
-    registerConjunct(p);
-  }
-
-  /**
-   * Creates an inferred equality predicate between the given slots.
-   */
-  public BinaryPredicate createInferredEqPred(SlotId lhsSlotId, SlotId rhsSlotId) {
-    BinaryPredicate pred = new BinaryPredicate(BinaryPredicate.Operator.EQ,
-        new SlotRef(globalState_.descTbl.getSlotDesc(lhsSlotId)),
-        new SlotRef(globalState_.descTbl.getSlotDesc(rhsSlotId)));
-    pred.setIsInferred();
-    // create casts if needed
-    pred.analyzeNoThrow(this);
-    return pred;
-  }
-
-  /**
-   * Return all unassigned non-constant registered conjuncts that are fully bound by
-   * given list of tuple ids. If 'inclOjConjuncts' is false, conjuncts tied to an
-   * Outer Join clause are excluded.
-   */
-  public List<Expr> getUnassignedConjuncts(
-      List<TupleId> tupleIds, boolean inclOjConjuncts) {
-    LOG.trace("getUnassignedConjuncts for " + Id.printIds(tupleIds));
-    List<Expr> result = Lists.newArrayList();
-    for (Expr e: globalState_.conjuncts.values()) {
-      if (e.isBoundByTupleIds(tupleIds)
-          && !e.isAuxExpr()
-          && !globalState_.assignedConjuncts.contains(e.getId())
-          && ((inclOjConjuncts && !e.isConstant())
-              || !globalState_.ojClauseByConjunct.containsKey(e.getId()))) {
-        result.add(e);
-        LOG.trace("getUnassignedConjunct: " + e.toSql());
-      }
-    }
-    return result;
-  }
-
-  public boolean isOjConjunct(Expr e) {
-    return globalState_.ojClauseByConjunct.containsKey(e.getId());
-  }
-
-  public boolean isIjConjunct(Expr e) {
-    return globalState_.ijClauseByConjunct.containsKey(e.getId());
-  }
-
-  public TableRef getFullOuterJoinRef(Expr e) {
-    return globalState_.fullOuterJoinedConjuncts.get(e.getId());
-  }
-
-  public boolean isFullOuterJoined(Expr e) {
-    return globalState_.fullOuterJoinedConjuncts.containsKey(e.getId());
-  }
-
-  /**
-   * Return all unassigned registered conjuncts for node's table ref ids.
-   * Wrapper around getUnassignedConjuncts(List<TupleId> tupleIds).
-   */
-  public List<Expr> getUnassignedConjuncts(PlanNode node) {
-    return getUnassignedConjuncts(node.getTblRefIds());
-  }
-
-  /**
-   * Return all unassigned registered conjuncts that are fully bound by the given
-   * (logical) tuple ids, can be evaluated by 'tupleIds' and are not tied to an
-   * Outer Join clause.
-   */
-  public List<Expr> getUnassignedConjuncts(List<TupleId> tupleIds) {
-    LOG.trace("getUnassignedConjuncts for node with " + Id.printIds(tupleIds));
-    List<Expr> result = Lists.newArrayList();
-    for (Expr e: getUnassignedConjuncts(tupleIds, true)) {
-      if (canEvalPredicate(tupleIds, e)) {
-        result.add(e);
-        LOG.trace("getUnassignedConjunct: " + e.toSql());
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns true if e must be evaluated by a join node. Note that it may still be
-   * safe to evaluate e elsewhere as well, but in any case the join must evaluate e.
-   */
-  public boolean evalByJoin(Expr e) {
-    List<TupleId> tids = Lists.newArrayList();
-    e.getIds(tids, null);
-    if (tids.isEmpty()) return false;
-    if (tids.size() > 1 || isOjConjunct(e) || isFullOuterJoined(e)
-        || (isOuterJoined(tids.get(0))
-            && (!e.isOnClauseConjunct() || isIjConjunct(e)))
-        || (isAntiJoinedConjunct(e) && !isSemiJoined(tids.get(0)))) {
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Return all unassigned conjuncts of the outer join referenced by right-hand side
-   * table ref.
-   */
-  public List<Expr> getUnassignedOjConjuncts(TableRef ref) {
-    Preconditions.checkState(ref.getJoinOp().isOuterJoin());
-    List<Expr> result = Lists.newArrayList();
-    List<ExprId> candidates = globalState_.conjunctsByOjClause.get(ref.getId());
-    if (candidates == null) return result;
-    for (ExprId conjunctId: candidates) {
-      if (!globalState_.assignedConjuncts.contains(conjunctId)) {
-        Expr e = globalState_.conjuncts.get(conjunctId);
-        Preconditions.checkNotNull(e);
-        result.add(e);
-        LOG.trace("getUnassignedOjConjunct: " + e.toSql());
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Return rhs ref of last Join clause that outer-joined id.
-   */
-  public TableRef getLastOjClause(TupleId id) {
-    return globalState_.outerJoinedTupleIds.get(id);
-  }
-
-  /**
-   * Return slot descriptor corresponding to column referenced in the context of
-   * tupleDesc, or null if no such reference exists.
-   */
-  public SlotDescriptor getColumnSlot(TupleDescriptor tupleDesc, Column col) {
-    for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-      if (slotDesc.getColumn() == col) return slotDesc;
-    }
-    return null;
-  }
-
-  public DescriptorTable getDescTbl() { return globalState_.descTbl; }
-  public ImpaladCatalog getCatalog() { return globalState_.catalog; }
-  public Set<String> getAliases() { return aliasMap_.keySet(); }
-
-  /**
-   * Returns list of candidate equi-join conjuncts to be evaluated by the join node
-   * that is specified by the table ref ids of its left and right children.
-   * If the join to be performed is an outer join, then only equi-join conjuncts
-   * from its On-clause are returned. If an equi-join conjunct is full outer joined,
-   * then it is only added to the result if this join is the one to full-outer join it.
-   */
-  public List<Expr> getEqJoinConjuncts(List<TupleId> lhsTblRefIds,
-      List<TupleId> rhsTblRefIds) {
-    // Contains all equi-join conjuncts that have one child fully bound by one of the
-    // rhs table ref ids (the other child is not bound by that rhs table ref id).
-    List<ExprId> conjunctIds = Lists.newArrayList();
-    for (TupleId rhsId: rhsTblRefIds) {
-      List<ExprId> cids = globalState_.eqJoinConjuncts.get(rhsId);
-      if (cids == null) continue;
-      for (ExprId eid: cids) {
-        if (!conjunctIds.contains(eid)) conjunctIds.add(eid);
-      }
-    }
-
-    // Since we currently prevent join re-reordering across outer joins, we can never
-    // have a bushy outer join with multiple rhs table ref ids. A busy outer join can
-    // only be constructed with an inline view (which has a single table ref id).
-    List<ExprId> ojClauseConjuncts = null;
-    if (rhsTblRefIds.size() == 1) {
-      ojClauseConjuncts = globalState_.conjunctsByOjClause.get(rhsTblRefIds.get(0));
-    }
-
-    // List of table ref ids that the join node will 'materialize'.
-    List<TupleId> nodeTblRefIds = Lists.newArrayList(lhsTblRefIds);
-    nodeTblRefIds.addAll(rhsTblRefIds);
-    List<Expr> result = Lists.newArrayList();
-    for (ExprId conjunctId: conjunctIds) {
-      Expr e = globalState_.conjuncts.get(conjunctId);
-      Preconditions.checkState(e != null);
-      if (!canEvalFullOuterJoinedConjunct(e, nodeTblRefIds) ||
-          !canEvalAntiJoinedConjunct(e, nodeTblRefIds)) {
-        continue;
-      }
-      if (ojClauseConjuncts != null && !ojClauseConjuncts.contains(conjunctId)) continue;
-      result.add(e);
-    }
-    return result;
-  }
-
-  /**
-   * Checks if a conjunct can be evaluated at a node materializing a list of tuple ids
-   * 'tids'.
-   */
-  public boolean canEvalFullOuterJoinedConjunct(Expr e, List<TupleId> tids) {
-    TableRef fullOuterJoin = getFullOuterJoinRef(e);
-    if (fullOuterJoin == null) return true;
-    return tids.containsAll(fullOuterJoin.getAllTableRefIds());
-  }
-
-  /**
-   * Returns true if predicate 'e' can be correctly evaluated by a tree materializing
-   * 'tupleIds', otherwise false:
-   * - the predicate needs to be bound by tupleIds
-   * - an On clause predicate against the non-nullable side of an Outer Join clause
-   *   can only be correctly evaluated by the join node that materializes the
-   *   Outer Join clause
-   * - otherwise, a predicate can only be correctly evaluated if for all outer-joined
-   *   referenced tids the last join to outer-join this tid has been materialized
-   */
-  public boolean canEvalPredicate(List<TupleId> tupleIds, Expr e) {
-    LOG.trace("canEval: " + e.toSql() + " " + e.debugString() + " "
-        + Id.printIds(tupleIds));
-    if (!e.isBoundByTupleIds(tupleIds)) return false;
-    ArrayList<TupleId> tids = Lists.newArrayList();
-    e.getIds(tids, null);
-    if (tids.isEmpty()) return true;
-
-    if (e.isOnClauseConjunct()) {
-      if (tids.size() > 1) {
-        // If the conjunct is from the ON-clause of an anti join, check if we can
-        // assign it to this node.
-        if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
-        // bail if this is from an OJ On clause; the join node will pick
-        // it up later via getUnassignedOjConjuncts()
-        if (globalState_.ojClauseByConjunct.containsKey(e.getId())) return false;
-        // If this is not from an OJ On clause (e.g. where clause or On clause of an
-        // inner join) and is full-outer joined, we need to make sure it is not
-        // assigned below the full outer join node that outer-joined it.
-        return canEvalFullOuterJoinedConjunct(e, tupleIds);
-      }
-
-      TupleId tid = tids.get(0);
-      if (globalState_.ojClauseByConjunct.containsKey(e.getId())) {
-        // OJ On-clause predicate: okay if it's from
-        // the same On clause that makes tid nullable
-        // (otherwise e needn't be true when that tuple is set)
-        if (!globalState_.outerJoinedTupleIds.containsKey(tid)) return false;
-        if (globalState_.ojClauseByConjunct.get(e.getId())
-            != globalState_.outerJoinedTupleIds.get(tid)) {
-          return false;
-        }
-        // Single tuple id conjuncts specified in the FOJ On-clause are not allowed to be
-        // assigned below that full outer join in the operator tree.
-        TableRef tblRef = globalState_.ojClauseByConjunct.get(e.getId());
-        if (tblRef.getJoinOp().isFullOuterJoin()) return false;
-      } else {
-        // Non-OJ On-clause conjunct.
-        if (isOuterJoined(tid)) {
-          // If the conjunct references an outer-joined tuple, then evaluate the
-          // conjunct at the join that the On-clause belongs to.
-          TableRef onClauseTableRef = globalState_.ijClauseByConjunct.get(e.getId());
-          Preconditions.checkNotNull(onClauseTableRef);
-          return tupleIds.containsAll(onClauseTableRef.getAllTableRefIds());
-        }
-        // If this single tid conjunct is from the On-clause of an anti-join, check if we
-        // can assign it to this node.
-        if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
-      }
-      // Single tid predicate that is not from an OJ On-clause and is outer-joined by a
-      // full outer join cannot be assigned below that full outer join in the
-      // operator tree.
-      return canEvalFullOuterJoinedConjunct(e, tupleIds);
-    }
-    if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
-
-    for (TupleId tid: tids) {
-      LOG.trace("canEval: checking tid " + tid.toString());
-      TableRef rhsRef = getLastOjClause(tid);
-      // this is not outer-joined; ignore
-      if (rhsRef == null) continue;
-      // check whether the last join to outer-join 'tid' is materialized by tupleIds
-      boolean contains = tupleIds.containsAll(rhsRef.getAllTableRefIds());
-      LOG.trace("canEval: contains=" + (contains ? "true " : "false ")
-          + Id.printIds(tupleIds) + " " + Id.printIds(rhsRef.getAllTableRefIds()));
-      if (!tupleIds.containsAll(rhsRef.getAllTableRefIds())) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Checks if a conjunct from the On-clause of an anti join can be evaluated in a node
-   * that materializes a given list of tuple ids.
-   */
-  public boolean canEvalAntiJoinedConjunct(Expr e, List<TupleId> nodeTupleIds) {
-    TableRef antiJoinRef = getAntiJoinRef(e);
-    if (antiJoinRef == null) return true;
-    List<TupleId> tids = Lists.newArrayList();
-    e.getIds(tids, null);
-    if (tids.size() > 1) {
-      return nodeTupleIds.containsAll(antiJoinRef.getAllTableRefIds())
-          && antiJoinRef.getAllTableRefIds().containsAll(nodeTupleIds);
-    }
-    // A single tid conjunct that is anti-joined can be safely assigned to a
-    // node below the anti join that specified it.
-    return globalState_.semiJoinedTupleIds.containsKey(tids.get(0));
-  }
-
-  /**
-   * Returns a list of predicates that are fully bound by destTid. Predicates are derived
-   * by replacing the slots of a source predicate with slots of the destTid, if for each
-   * source slot there is an equivalent slot in destTid.
-   * In particular, the returned list contains predicates that must be evaluated
-   * at a join node (bound to outer-joined tuple) but can also be safely evaluated by a
-   * plan node materializing destTid. Such predicates are not marked as assigned.
-   * All other inferred predicates are marked as assigned if 'markAssigned'
-   * is true. This function returns bound predicates regardless of whether the source
-   * predicated have been assigned. It is up to the caller to decide if a bound predicate
-   * should actually be used.
-   * Destination slots in destTid can be ignored by passing them in ignoreSlots.
-   * TODO: exclude UDFs from predicate propagation? their overloaded variants could
-   * have very different semantics
-   */
-  public ArrayList<Expr> getBoundPredicates(TupleId destTid, Set<SlotId> ignoreSlots,
-      boolean markAssigned) {
-    ArrayList<Expr> result = Lists.newArrayList();
-    for (ExprId srcConjunctId: globalState_.singleTidConjuncts) {
-      Expr srcConjunct = globalState_.conjuncts.get(srcConjunctId);
-      if (srcConjunct instanceof SlotRef) continue;
-      Preconditions.checkNotNull(srcConjunct);
-      List<TupleId> srcTids = Lists.newArrayList();
-      List<SlotId> srcSids = Lists.newArrayList();
-      srcConjunct.getIds(srcTids, srcSids);
-      Preconditions.checkState(srcTids.size() == 1);
-
-      // Generate slot-mappings to bind srcConjunct to destTid.
-      TupleId srcTid = srcTids.get(0);
-      List<List<SlotId>> allDestSids =
-          getEquivDestSlotIds(srcTid, srcSids, destTid, ignoreSlots);
-      if (allDestSids.isEmpty()) continue;
-
-      // Indicates whether the source slots have equivalent slots that belong
-      // to an outer-joined tuple.
-      boolean hasOuterJoinedTuple = false;
-      for (SlotId srcSid: srcSids) {
-        if (hasOuterJoinedTuple(globalState_.equivClassBySlotId.get(srcSid))) {
-          hasOuterJoinedTuple = true;
-          break;
-        }
-      }
-
-      // It is incorrect to propagate predicates into a plan subtree that is on the
-      // nullable side of an outer join if the predicate evaluates to true when all
-      // its referenced tuples are NULL. The check below is conservative because the
-      // outer-joined tuple making 'hasOuterJoinedTuple' true could be in a parent block
-      // of 'srcConjunct', in which case it is safe to propagate 'srcConjunct' within
-      // child blocks of the outer-joined parent block.
-      // TODO: Make the check precise by considering the blocks (analyzers) where the
-      // outer-joined tuples in the dest slot's equivalence classes appear
-      // relative to 'srcConjunct'.
-      if (hasOuterJoinedTuple && isTrueWithNullSlots(srcConjunct)) continue;
-
-      // if srcConjunct comes out of an OJ's On clause, we need to make sure it's the
-      // same as the one that makes destTid nullable
-      // (otherwise srcConjunct needn't be true when destTid is set)
-      if (globalState_.ojClauseByConjunct.containsKey(srcConjunct.getId())) {
-        if (!globalState_.outerJoinedTupleIds.containsKey(destTid)) continue;
-        if (globalState_.ojClauseByConjunct.get(srcConjunct.getId())
-            != globalState_.outerJoinedTupleIds.get(destTid)) {
-          continue;
-        }
-        // Do not propagate conjuncts from the on-clause of full-outer or anti-joins.
-        TableRef tblRef = globalState_.ojClauseByConjunct.get(srcConjunct.getId());
-        if (tblRef.getJoinOp().isFullOuterJoin()) continue;
-      }
-
-      // Conjuncts specified in the ON-clause of an anti-join must be evaluated at that
-      // join node.
-      if (isAntiJoinedConjunct(srcConjunct)) continue;
-
-      // Generate predicates for all src-to-dest slot mappings.
-      for (List<SlotId> destSids: allDestSids) {
-        Preconditions.checkState(destSids.size() == srcSids.size());
-        Expr p;
-        if (srcSids.containsAll(destSids)) {
-          p = srcConjunct;
-        } else {
-          ExprSubstitutionMap smap = new ExprSubstitutionMap();
-          for (int i = 0; i < srcSids.size(); ++i) {
-            smap.put(
-                new SlotRef(globalState_.descTbl.getSlotDesc(srcSids.get(i))),
-                new SlotRef(globalState_.descTbl.getSlotDesc(destSids.get(i))));
-          }
-          try {
-            p = srcConjunct.trySubstitute(smap, this, false);
-          } catch (ImpalaException exc) {
-            // not an executable predicate; ignore
-            continue;
-          }
-          // Unset the id because this bound predicate itself is not registered, and
-          // to prevent callers from inadvertently marking the srcConjunct as assigned.
-          p.setId(null);
-          if (p instanceof BinaryPredicate) ((BinaryPredicate) p).setIsInferred();
-          LOG.trace("new pred: " + p.toSql() + " " + p.debugString());
-        }
-
-        if (markAssigned) {
-          // predicate assignment doesn't hold if:
-          // - the application against slotId doesn't transfer the value back to its
-          //   originating slot
-          // - the original predicate is on an OJ'd table but doesn't originate from
-          //   that table's OJ clause's ON clause (if it comes from anywhere but that
-          //   ON clause, it needs to be evaluated directly by the join node that
-          //   materializes the OJ'd table)
-          boolean reverseValueTransfer = true;
-          for (int i = 0; i < srcSids.size(); ++i) {
-            if (!hasValueTransfer(destSids.get(i), srcSids.get(i))) {
-              reverseValueTransfer = false;
-              break;
-            }
-          }
-
-          // Check if either srcConjunct or the generated predicate needs to be evaluated
-          // at a join node (IMPALA-2018).
-          boolean evalByJoin =
-              (evalByJoin(srcConjunct)
-               && (globalState_.ojClauseByConjunct.get(srcConjunct.getId())
-                != globalState_.outerJoinedTupleIds.get(srcTid)))
-              || (evalByJoin(p)
-                  && (globalState_.ojClauseByConjunct.get(p.getId())
-                   != globalState_.outerJoinedTupleIds.get(destTid)));
-
-          // mark all bound predicates including duplicate ones
-          if (reverseValueTransfer && !evalByJoin) markConjunctAssigned(srcConjunct);
-        }
-
-        // check if we already created this predicate
-        if (!result.contains(p)) result.add(p);
-      }
-    }
-    return result;
-  }
-
-  public ArrayList<Expr> getBoundPredicates(TupleId destTid) {
-    return getBoundPredicates(destTid, new HashSet<SlotId>(), true);
-  }
-
-  /**
-   * Modifies the analysis state associated with the rhs table ref of an outer join
-   * to accomodate a join inversion that changes the rhs table ref of the join from
-   * oldRhsTbl to newRhsTbl.
-   * TODO: Revisit this function and how outer joins are inverted. This function
-   * should not be necessary because the semantics of an inverted outer join do
-   * not change. This function will naturally become obsolete when we can transform
-   * outer joins with otherPredicates into inner joins.
-   */
-  public void invertOuterJoinState(TableRef oldRhsTbl, TableRef newRhsTbl) {
-    Preconditions.checkState(oldRhsTbl.getJoinOp().isOuterJoin());
-    // Invert analysis state for an outer join.
-    List<ExprId> conjunctIds =
-        globalState_.conjunctsByOjClause.remove(oldRhsTbl.getId());
-    if (conjunctIds != null) {
-      globalState_.conjunctsByOjClause.put(newRhsTbl.getId(), conjunctIds);
-      for (ExprId eid: conjunctIds) {
-        globalState_.ojClauseByConjunct.put(eid, newRhsTbl);
-      }
-    } else {
-      // An outer join is allowed not to have an On-clause if the rhs table ref is
-      // correlated or relative.
-      Preconditions.checkState(oldRhsTbl.isCorrelated() || oldRhsTbl.isRelative());
-    }
-    for (Map.Entry<TupleId, TableRef> e: globalState_.outerJoinedTupleIds.entrySet()) {
-      if (e.getValue() == oldRhsTbl) e.setValue(newRhsTbl);
-    }
-  }
-
-  /**
-   * For each equivalence class, adds/removes predicates from conjuncts such that it
-   * contains a minimum set of <lhsSlot> = <rhsSlot> predicates that establish the known
-   * equivalences between slots in lhsTids and rhsTids which must be disjoint.
-   * Preserves original conjuncts when possible. Assumes that predicates for establishing
-   * equivalences among slots in only lhsTids and only rhsTids have already been
-   * established. This function adds the remaining predicates to "connect" the disjoint
-   * equivalent slot sets of lhsTids and rhsTids.
-   * The intent of this function is to enable construction of a minimum spanning tree
-   * to cover the known slot equivalences. This function should be called for join
-   * nodes during plan generation to (1) remove redundant join predicates, and (2)
-   * establish equivalences among slots materialized at that join node.
-   * TODO: Consider optimizing for the cheapest minimum set of predicates.
-   * TODO: Consider caching the DisjointSet during plan generation instead of
-   * re-creating it here on every invocation.
-   */
-  public <T extends Expr> void createEquivConjuncts(List<TupleId> lhsTids,
-      List<TupleId> rhsTids, List<T> conjuncts) {
-    Preconditions.checkState(Collections.disjoint(lhsTids, rhsTids));
-
-    // Equivalence classes only containing slots belonging to lhsTids.
-    Map<EquivalenceClassId, List<SlotId>> lhsEquivClasses =
-        getEquivClasses(lhsTids);
-
-    // Equivalence classes only containing slots belonging to rhsTids.
-    Map<EquivalenceClassId, List<SlotId>> rhsEquivClasses =
-        getEquivClasses(rhsTids);
-
-    // Maps from a slot id to its set of equivalent slots. Used to track equivalences
-    // that have been established by predicates assigned/generated to plan nodes
-    // materializing lhsTids as well as the given conjuncts.
-    DisjointSet<SlotId> partialEquivSlots = new DisjointSet<SlotId>();
-    // Add the partial equivalences to the partialEquivSlots map. The equivalent-slot
-    // sets of slots from lhsTids are disjoint from those of slots from rhsTids.
-    // We need to 'connect' the disjoint slot sets by constructing a new predicate
-    // for each equivalence class (unless there is already one in 'conjuncts').
-    for (List<SlotId> partialEquivClass: lhsEquivClasses.values()) {
-      partialEquivSlots.bulkUnion(partialEquivClass);
-    }
-    for (List<SlotId> partialEquivClass: rhsEquivClasses.values()) {
-      partialEquivSlots.bulkUnion(partialEquivClass);
-    }
-
-    // Set of outer-joined slots referenced by conjuncts.
-    Set<SlotId> outerJoinedSlots = Sets.newHashSet();
-
-    // Update partialEquivSlots based on equality predicates in 'conjuncts'. Removes
-    // redundant conjuncts, unless they reference outer-joined slots (see below).
-    Iterator<T> conjunctIter = conjuncts.iterator();
-    while (conjunctIter.hasNext()) {
-      Expr conjunct = conjunctIter.next();
-      Pair<SlotId, SlotId> eqSlots = BinaryPredicate.getEqSlots(conjunct);
-      if (eqSlots == null) continue;
-      EquivalenceClassId firstEqClassId = getEquivClassId(eqSlots.first);
-      EquivalenceClassId secondEqClassId = getEquivClassId(eqSlots.second);
-      // slots may not be in the same eq class due to outer joins
-      if (!firstEqClassId.equals(secondEqClassId)) continue;
-
-      // Retain an otherwise redundant predicate if it references a slot of an
-      // outer-joined tuple that is not already referenced by another join predicate
-      // to maintain that the rows must satisfy outer-joined-slot IS NOT NULL
-      // (otherwise NULL tuples from outer joins could survive).
-      // TODO: Consider better fixes for outer-joined slots: (1) Create IS NOT NULL
-      // predicates and place them at the lowest possible plan node. (2) Convert outer
-      // joins into inner joins (or full outer joins into left/right outer joins).
-      boolean filtersOuterJoinNulls = false;
-      if (isOuterJoined(eqSlots.first)
-          && lhsTids.contains(getTupleId(eqSlots.first))
-          && !outerJoinedSlots.contains(eqSlots.first)) {
-        outerJoinedSlots.add(eqSlots.first);
-        filtersOuterJoinNulls = true;
-      }
-      if (isOuterJoined(eqSlots.second)
-          && lhsTids.contains(getTupleId(eqSlots.second))
-          && !outerJoinedSlots.contains(eqSlots.second)) {
-        outerJoinedSlots.add(eqSlots.second);
-        filtersOuterJoinNulls = true;
-      }
-      // retain conjunct if it connects two formerly unconnected equiv classes or
-      // it is required for outer-join semantics
-      if (!partialEquivSlots.union(eqSlots.first, eqSlots.second)
-          && !filtersOuterJoinNulls) {
-        conjunctIter.remove();
-      }
-    }
-
-    // For each equivalence class, construct a new predicate to 'connect' the disjoint
-    // slot sets.
-    for (Map.Entry<EquivalenceClassId, List<SlotId>> rhsEquivClass:
-      rhsEquivClasses.entrySet()) {
-      List<SlotId> lhsSlots = lhsEquivClasses.get(rhsEquivClass.getKey());
-      if (lhsSlots == null) continue;
-      List<SlotId> rhsSlots = rhsEquivClass.getValue();
-      Preconditions.checkState(!lhsSlots.isEmpty() && !rhsSlots.isEmpty());
-
-      if (!partialEquivSlots.union(lhsSlots.get(0), rhsSlots.get(0))) continue;
-      // Do not create a new predicate from slots that are full outer joined because that
-      // predicate may be incorrectly assigned to a node below the associated full outer
-      // join.
-      if (isFullOuterJoined(lhsSlots.get(0)) || isFullOuterJoined(rhsSlots.get(0))) {
-        continue;
-      }
-      T newEqPred = (T) createInferredEqPred(lhsSlots.get(0), rhsSlots.get(0));
-      if (!hasMutualValueTransfer(lhsSlots.get(0), rhsSlots.get(0))) continue;
-      conjuncts.add(newEqPred);
-    }
-  }
-
-  /**
-   * For each equivalence class, adds/removes predicates from conjuncts such that
-   * it contains a minimum set of <slot> = <slot> predicates that establish
-   * the known equivalences between slots belonging to tid. Preserves original
-   * conjuncts when possible.
-   * The intent of this function is to enable construction of a minimum spanning tree
-   * to cover the known slot equivalences. This function should be called to add
-   * conjuncts to plan nodes that materialize a new tuple, e.g., scans and aggregations.
-   * Does not enforce equivalence between slots in ignoreSlots. Equivalences (if any)
-   * among slots in ignoreSlots are assumed to have already been enforced.
-   * TODO: Consider optimizing for the cheapest minimum set of predicates.
-   */
-  public <T extends Expr> void createEquivConjuncts(TupleId tid, List<T> conjuncts,
-      Set<SlotId> ignoreSlots) {
-    // Maps from a slot id to its set of equivalent slots. Used to track equivalences
-    // that have been established by 'conjuncts' and the 'ignoredsSlots'.
-    DisjointSet<SlotId> partialEquivSlots = new DisjointSet<SlotId>();
-
-    // Treat ignored slots as already connected. Add the ignored slots at this point
-    // such that redundant conjuncts are removed.
-    partialEquivSlots.bulkUnion(ignoreSlots);
-    partialEquivSlots.checkConsistency();
-
-    // Update partialEquivSlots based on equality predicates in 'conjuncts'. Removes
-    // redundant conjuncts, unless they reference outer-joined slots (see below).
-    Iterator<T> conjunctIter = conjuncts.iterator();
-    while (conjunctIter.hasNext()) {
-      Expr conjunct = conjunctIter.next();
-      Pair<SlotId, SlotId> eqSlots = BinaryPredicate.getEqSlots(conjunct);
-      if (eqSlots == null) continue;
-      EquivalenceClassId firstEqClassId = getEquivClassId(eqSlots.first);
-      EquivalenceClassId secondEqClassId = getEquivClassId(eqSlots.second);
-      // slots may not be in the same eq class due to outer joins
-      if (!firstEqClassId.equals(secondEqClassId)) continue;
-      // update equivalences and remove redundant conjuncts
-      if (!partialEquivSlots.union(eqSlots.first, eqSlots.second)) conjunctIter.remove();
-    }
-    // Suppose conjuncts had these predicates belonging to equivalence classes e1 and e2:
-    // e1: s1 = s2, s3 = s4, s3 = s5
-    // e2: s10 = s11
-    // The conjunctsEquivSlots should contain the following entries at this point:
-    // s1 -> {s1, s2}
-    // s2 -> {s1, s2}
-    // s3 -> {s3, s4, s5}
-    // s4 -> {s3, s4, s5}
-    // s5 -> {s3, s4, s5}
-    // s10 -> {s10, s11}
-    // s11 -> {s10, s11}
-    // Assuming e1 = {s1, s2, s3, s4, s5} we need to generate one additional equality
-    // predicate to "connect" {s1, s2} and {s3, s4, s5}.
-
-    // These are the equivalences that need to be established by constructing conjuncts
-    // to form a minimum spanning tree.
-    Map<EquivalenceClassId, List<SlotId>> targetEquivClasses =
-        getEquivClasses(Lists.newArrayList(tid));
-    for (Map.Entry<EquivalenceClassId, List<SlotId>> targetEquivClass:
-      targetEquivClasses.entrySet()) {
-      // Loop over all pairs of equivalent slots and merge their disjoint slots sets,
-      // creating missing equality predicates as necessary.
-      List<SlotId> slotIds = targetEquivClass.getValue();
-      boolean done = false;
-      for (int i = 1; i < slotIds.size(); ++i) {
-        SlotId rhs = slotIds.get(i);
-        for (int j = 0; j < i; ++j) {
-          SlotId lhs = slotIds.get(j);
-          if (!partialEquivSlots.union(lhs, rhs)) continue;
-          if (!hasMutualValueTransfer(lhs, rhs)) continue;
-          conjuncts.add((T) createInferredEqPred(lhs, rhs));
-          // Check for early termination.
-          if (partialEquivSlots.get(lhs).size() == slotIds.size()) {
-            done = true;
-            break;
-          }
-        }
-        if (done) break;
-      }
-    }
-  }
-
-  public <T extends Expr> void createEquivConjuncts(TupleId tid, List<T> conjuncts) {
-    createEquivConjuncts(tid, conjuncts, new HashSet<SlotId>());
-  }
-
-  /**
-   * Returns a map of partial equivalence classes that only contains slot ids belonging
-   * to the given tuple ids. Only contains equivalence classes with more than one member.
-   */
-  private Map<EquivalenceClassId, List<SlotId>> getEquivClasses(List<TupleId> tids) {
-    Map<EquivalenceClassId, List<SlotId>> result = Maps.newHashMap();
-    for (TupleId tid: tids) {
-      for (SlotDescriptor slotDesc: getTupleDesc(tid).getSlots()) {
-        EquivalenceClassId eqClassId = getEquivClassId(slotDesc.getId());
-        // Ignore equivalence classes that are empty or only have a single member.
-        if (globalState_.equivClassMembers.get(eqClassId).size() <= 1) continue;
-        List<SlotId> slotIds = result.get(eqClassId);
-        if (slotIds == null) {
-          slotIds = Lists.newArrayList();
-          result.put(eqClassId, slotIds);
-        }
-        slotIds.add(slotDesc.getId());
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns a list of slot mappings from srcTid to destTid for the purpose of predicate
-   * propagation. Each mapping assigns every slot in srcSids to an equivalent slot in
-   * destTid. Does not generate all possible mappings, but limits the results to
-   * useful and/or non-redundant mappings, i.e., those mappings that would improve
-   * the performance of query execution.
-   */
-  private List<List<SlotId>> getEquivDestSlotIds(TupleId srcTid, List<SlotId> srcSids,
-      TupleId destTid, Set<SlotId> ignoreSlots) {
-    List<List<SlotId>> allDestSids = Lists.newArrayList();
-    TupleDescriptor destTupleDesc = getTupleDesc(destTid);
-    if (srcSids.size() == 1) {
-      // Generate all mappings to propagate predicates of the form <slot> <op> <constant>
-      // to as many destination slots as possible.
-      // TODO: If srcTid == destTid we could limit the mapping to partition
-      // columns because mappings to non-partition columns do not provide
-      // a performance benefit.
-      SlotId srcSid = srcSids.get(0);
-      for (SlotDescriptor destSlot: destTupleDesc.getSlots()) {
-        if (ignoreSlots.contains(destSlot.getId())) continue;
-        if (hasValueTransfer(srcSid, destSlot.getId())) {
-          allDestSids.add(Lists.newArrayList(destSlot.getId()));
-        }
-      }
-    } else if (srcTid.equals(destTid)) {
-      // Multiple source slot ids and srcTid == destTid. Inter-tuple transfers are
-      // already expressed by the original conjuncts. Any mapping would be redundant.
-      // Still add srcSids to the result because we rely on getBoundPredicates() to
-      // include predicates that can safely be evaluated below an outer join, but must
-      // also be evaluated by the join itself (evalByJoin() == true).
-      allDestSids.add(srcSids);
-    } else {
-      // Multiple source slot ids and srcTid != destTid. Pick the first mapping
-      // where each srcSid is mapped to a different destSid to avoid generating
-      // redundant and/or trivial predicates.
-      // TODO: This approach is not guaranteed to find the best slot mapping
-      // (e.g., against partition columns) or all non-redundant mappings.
-      // The limitations are show in predicate-propagation.test.
-      List<SlotId> destSids = Lists.newArrayList();
-      for (SlotId srcSid: srcSids) {
-        for (SlotDescriptor destSlot: destTupleDesc.getSlots()) {
-          if (ignoreSlots.contains(destSlot.getId())) continue;
-          if (hasValueTransfer(srcSid, destSlot.getId())
-              && !destSids.contains(destSlot.getId())) {
-            destSids.add(destSlot.getId());
-            break;
-          }
-        }
-      }
-      if (destSids.size() == srcSids.size()) allDestSids.add(destSids);
-    }
-    return allDestSids;
-  }
-
-  /**
-   * Returns true if the equivalence class identified by 'eqClassId' contains
-   * a slot belonging to an outer-joined tuple.
-   */
-  private boolean hasOuterJoinedTuple(EquivalenceClassId eqClassId) {
-    ArrayList<SlotId> eqClass = globalState_.equivClassMembers.get(eqClassId);
-    for (SlotId s: eqClass) {
-      if (isOuterJoined(getTupleId(s))) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Returns true if 'p' evaluates to true when all its referenced slots are NULL,
-   * false otherwise.
-   * TODO: Can we avoid dealing with the exceptions thrown by analysis and eval?
-   */
-  public boolean isTrueWithNullSlots(Expr p) {
-    // Construct predicate with all SlotRefs substituted by NullLiterals.
-    List<SlotRef> slotRefs = Lists.newArrayList();
-    p.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
-
-    // Map for substituting SlotRefs with NullLiterals.
-    ExprSubstitutionMap nullSmap = new ExprSubstitutionMap();
-    for (SlotRef slotRef: slotRefs) {
-        // Preserve the original SlotRef type to ensure all substituted
-        // subexpressions in the predicate have the same return type and
-        // function signature as in the original predicate.
-        nullSmap.put(slotRef.clone(), NullLiteral.create(slotRef.getType()));
-    }
-    Expr nullTuplePred = p.substitute(nullSmap, this, false);
-    try {
-      return FeSupport.EvalPredicate(nullTuplePred, getQueryCtx());
-    } catch (InternalException e) {
-      Preconditions.checkState(false, "Failed to evaluate generated predicate: "
-          + nullTuplePred.toSql() + "." + e.getMessage());
-    }
-    return true;
-  }
-
-  public TupleId getTupleId(SlotId slotId) {
-    return globalState_.descTbl.getSlotDesc(slotId).getParent().getId();
-  }
-
-  public void registerValueTransfer(SlotId id1, SlotId id2) {
-    globalState_.registeredValueTransfers.add(new Pair(id1, id2));
-  }
-
-  public boolean isOuterJoined(TupleId tid) {
-    return globalState_.outerJoinedTupleIds.containsKey(tid);
-  }
-
-  public boolean isOuterJoined(SlotId sid) {
-    return isOuterJoined(getTupleId(sid));
-  }
-
-  public boolean isSemiJoined(TupleId tid) {
-    return globalState_.semiJoinedTupleIds.containsKey(tid);
-  }
-
-  public boolean isAntiJoinedConjunct(Expr e) {
-    return getAntiJoinRef(e) != null;
-  }
-
-  public TableRef getAntiJoinRef(Expr e) {
-    TableRef tblRef = globalState_.sjClauseByConjunct.get(e.getId());
-    if (tblRef == null) return null;
-    return (tblRef.getJoinOp().isAntiJoin()) ? tblRef : null;
-  }
-
-  public boolean isFullOuterJoined(TupleId tid) {
-    return globalState_.fullOuterJoinedTupleIds.containsKey(tid);
-  }
-
-  public boolean isFullOuterJoined(SlotId sid) {
-    return isFullOuterJoined(getTupleId(sid));
-  }
-
-  public boolean isVisible(TupleId tid) {
-    return tid == visibleSemiJoinedTupleId_ || !isSemiJoined(tid);
-  }
-
-  public boolean containsOuterJoinedTid(List<TupleId> tids) {
-    for (TupleId tid: tids) {
-      if (isOuterJoined(tid)) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Populate globalState.valueTransfer based on the registered equi-join predicates
-   * of the form <slotref> = <slotref>.
-   */
-  public void computeEquivClasses() {
-    globalState_.valueTransferGraph = new ValueTransferGraph();
-    globalState_.valueTransferGraph.computeValueTransfers();
-
-    // we start out by assigning each slot to its own equiv class
-    int numSlots = globalState_.descTbl.getMaxSlotId().asInt() + 1;
-    for (int i = 0; i < numSlots; ++i) {
-      EquivalenceClassId id = globalState_.equivClassIdGenerator.getNextId();
-      globalState_.equivClassMembers.put(id, Lists.newArrayList(new SlotId(i)));
-    }
-
-    // merge two classes if there is a value transfer between all members of the
-    // combined class; do this until there's nothing left to merge
-    boolean merged;
-    do {
-      merged = false;
-      for (Map.Entry<EquivalenceClassId, ArrayList<SlotId>> e1:
-          globalState_.equivClassMembers.entrySet()) {
-        for (Map.Entry<EquivalenceClassId, ArrayList<SlotId>> e2:
-            globalState_.equivClassMembers.entrySet()) {
-          if (e1.getKey() == e2.getKey()) continue;
-          List<SlotId> class1Members = e1.getValue();
-          if (class1Members.isEmpty()) continue;
-          List<SlotId> class2Members = e2.getValue();
-          if (class2Members.isEmpty()) continue;
-
-          // check whether we can transfer values between all members
-          boolean canMerge = true;
-          for (SlotId class1Slot: class1Members) {
-            for (SlotId class2Slot: class2Members) {
-              if (!hasValueTransfer(class1Slot, class2Slot)
-                  && !hasValueTransfer(class2Slot, class1Slot)) {
-                canMerge = false;
-                break;
-              }
-            }
-            if (!canMerge) break;
-          }
-          if (!canMerge) continue;
-
-          // merge classes 1 and 2 by transfering 2 into 1
-          class1Members.addAll(class2Members);
-          class2Members.clear();
-          merged = true;
-        }
-      }
-    } while (merged);
-
-    // populate equivC

<TRUNCATED>


[46/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ArithmeticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ArithmeticExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/ArithmeticExpr.java
deleted file mode 100644
index bf8b0ea..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ArithmeticExpr.java
+++ /dev/null
@@ -1,268 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class ArithmeticExpr extends Expr {
-  enum OperatorPosition {
-    BINARY_INFIX,
-    UNARY_PREFIX,
-    UNARY_POSTFIX,
-  }
-
-  enum Operator {
-    MULTIPLY("*", "multiply", OperatorPosition.BINARY_INFIX),
-    DIVIDE("/", "divide", OperatorPosition.BINARY_INFIX),
-    MOD("%", "mod", OperatorPosition.BINARY_INFIX),
-    INT_DIVIDE("DIV", "int_divide", OperatorPosition.BINARY_INFIX),
-    ADD("+", "add", OperatorPosition.BINARY_INFIX),
-    SUBTRACT("-", "subtract", OperatorPosition.BINARY_INFIX),
-    BITAND("&", "bitand", OperatorPosition.BINARY_INFIX),
-    BITOR("|", "bitor", OperatorPosition.BINARY_INFIX),
-    BITXOR("^", "bitxor", OperatorPosition.BINARY_INFIX),
-    BITNOT("~", "bitnot", OperatorPosition.UNARY_PREFIX),
-    FACTORIAL("!", "factorial", OperatorPosition.UNARY_POSTFIX);
-
-    private final String description_;
-    private final String name_;
-    private final OperatorPosition pos_;
-
-    private Operator(String description, String name, OperatorPosition pos) {
-      this.description_ = description;
-      this.name_ = name;
-      this.pos_ = pos;
-    }
-
-    @Override
-    public String toString() { return description_; }
-    public String getName() { return name_; }
-    public OperatorPosition getPos() { return pos_; }
-
-    public boolean isUnary() {
-      return pos_ == OperatorPosition.UNARY_PREFIX ||
-             pos_ == OperatorPosition.UNARY_POSTFIX;
-    }
-
-    public boolean isBinary() {
-      return pos_ == OperatorPosition.BINARY_INFIX;
-    }
-  }
-
-  private final Operator op_;
-
-  public Operator getOp() { return op_; }
-
-  public ArithmeticExpr(Operator op, Expr e1, Expr e2) {
-    super();
-    this.op_ = op;
-    Preconditions.checkNotNull(e1);
-    children_.add(e1);
-    Preconditions.checkArgument((op.isUnary() && e2 == null) ||
-        (op.isBinary() && e2 != null));
-    if (e2 != null) children_.add(e2);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected ArithmeticExpr(ArithmeticExpr other) {
-    super(other);
-    op_ = other.op_;
-  }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getNumericTypes()) {
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.MULTIPLY.getName(), Lists.newArrayList(t, t), t));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.ADD.getName(), Lists.newArrayList(t, t), t));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.SUBTRACT.getName(), Lists.newArrayList(t, t), t));
-    }
-    db.addBuiltin(ScalarFunction.createBuiltinOperator(
-        Operator.DIVIDE.getName(),
-        Lists.<Type>newArrayList(Type.DOUBLE, Type.DOUBLE),
-        Type.DOUBLE));
-    db.addBuiltin(ScalarFunction.createBuiltinOperator(
-        Operator.DIVIDE.getName(),
-        Lists.<Type>newArrayList(Type.DECIMAL, Type.DECIMAL),
-        Type.DECIMAL));
-
-    /*
-     * MOD(), FACTORIAL(), BITAND(), BITOR(), BITXOR(), and BITNOT() are registered as
-     * builtins, see impala_functions.py
-     */
-    for (Type t: Type.getIntegerTypes()) {
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.INT_DIVIDE.getName(), Lists.newArrayList(t, t), t));
-    }
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("op", op_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  public String toSqlImpl() {
-    if (children_.size() == 1) {
-      if (op_.getPos() == OperatorPosition.UNARY_PREFIX) {
-        return op_.toString() + getChild(0).toSql();
-      } else {
-        assert(op_.getPos() == OperatorPosition.UNARY_POSTFIX);
-        return getChild(0).toSql() + op_.toString();
-      }
-    } else {
-      Preconditions.checkState(children_.size() == 2);
-      return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
-    }
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  /**
-   * Inserts a cast from child[childIdx] to targetType if one is necessary.
-   * Note this is different from Expr.castChild() since arithmetic for decimals
-   * the cast is handled as part of the operator and in general, the return type
-   * does not match the input types.
-   */
-  void castChild(int childIdx, Type targetType) throws AnalysisException {
-    Type t = getChild(childIdx).getType();
-    if (t.matchesType(targetType)) return;
-    if (targetType.isDecimal() && !t.isNull()) {
-      Preconditions.checkState(t.isScalarType());
-      targetType = ((ScalarType) t).getMinResolutionDecimal();
-    }
-    castChild(targetType, childIdx);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    for (Expr child: children_) {
-      Expr operand = (Expr) child;
-      if (!operand.type_.isNumericType() && !operand.type_.isNull()) {
-        String errMsg = "Arithmetic operation requires numeric operands: " + toSql();
-        if (operand instanceof Subquery && !operand.type_.isScalarType()) {
-          errMsg = "Subquery must return a single row: " + operand.toSql();
-        }
-        throw new AnalysisException(errMsg);
-      }
-    }
-
-    convertNumericLiteralsFromDecimal(analyzer);
-    Type t0 = getChild(0).getType();
-    Type t1 = null;
-    if (op_.isUnary()) {
-      Preconditions.checkState(children_.size() == 1);
-    } else if (op_.isBinary()) {
-      Preconditions.checkState(children_.size() == 2);
-      t1 = getChild(1).getType();
-    }
-    if (hasChildCosts()) evalCost_ = getChildCosts() + ARITHMETIC_OP_COST;
-
-    String fnName = op_.getName();
-    switch (op_) {
-      case ADD:
-      case SUBTRACT:
-      case DIVIDE:
-      case MULTIPLY:
-      case MOD:
-        type_ = TypesUtil.getArithmeticResultType(t0, t1, op_);
-        // If both of the children are null, we'll default to the DOUBLE version of the
-        // operator. This prevents the BE from seeing NULL_TYPE.
-        if (type_.isNull()) type_ = Type.DOUBLE;
-        break;
-
-      case INT_DIVIDE:
-      case BITAND:
-      case BITOR:
-      case BITXOR:
-        if ((!t0.isNull() & !t0.isIntegerType()) ||
-            (!t1.isNull() && !t1.isIntegerType())) {
-          throw new AnalysisException("Invalid non-integer argument to operation '" +
-              op_.toString() + "': " + this.toSql());
-        }
-        type_ = Type.getAssignmentCompatibleType(t0, t1, false);
-        // If both of the children are null, we'll default to the INT version of the
-        // operator. This prevents the BE from seeing NULL_TYPE.
-        if (type_.isNull()) type_ = Type.INT;
-        Preconditions.checkState(type_.isIntegerType());
-        break;
-      case BITNOT:
-      case FACTORIAL:
-        if (!t0.isNull() && !t0.isIntegerType()) {
-          throw new AnalysisException("'" + op_.toString() + "'" +
-              " operation only allowed on integer types: " + toSql());
-        }
-        // Special-case NULL to resolve to the appropriate type.
-        if (op_ == Operator.BITNOT) {
-          if (t0.isNull()) castChild(0, Type.INT);
-        } else {
-          assert(op_ == Operator.FACTORIAL);
-          if (t0.isNull()) castChild(0, Type.BIGINT);
-        }
-        fn_ = getBuiltinFunction(analyzer, op_.getName(), collectChildReturnTypes(),
-            CompareMode.IS_SUPERTYPE_OF);
-        Preconditions.checkNotNull(fn_);
-        castForFunctionCall(false);
-        type_ = fn_.getReturnType();
-        return;
-      default:
-        // the programmer forgot to deal with a case
-        Preconditions.checkState(false,
-            "Unknown arithmetic operation " + op_.toString() + " in: " + this.toSql());
-        break;
-    }
-
-    // Don't cast from decimal to decimal. The BE function can just handle this.
-    if (!(type_.isDecimal() && t0.isDecimal())) castChild(0, type_);
-    if (!(type_.isDecimal() && t1.isDecimal())) castChild(1, type_);
-    t0 = getChild(0).getType();
-    t1 = getChild(1).getType();
-
-    fn_ = getBuiltinFunction(analyzer, fnName, collectChildReturnTypes(),
-        CompareMode.IS_IDENTICAL);
-    if (fn_ == null) {
-      Preconditions.checkState(false, String.format("No match " +
-          "for '%s' with operand types %s and %s", toSql(), t0, t1));
-    }
-    Preconditions.checkState(type_.matchesType(fn_.getReturnType()));
-  }
-
-  @Override
-  public Expr clone() { return new ArithmeticExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AuthorizationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AuthorizationStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AuthorizationStmt.java
deleted file mode 100644
index 4e88014..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AuthorizationStmt.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Strings;
-
-/**
- * Base class for all authorization statements - CREATE/DROP/SHOW ROLE, GRANT/REVOKE
- * ROLE/privilege, etc.
- */
-public class AuthorizationStmt extends StatementBase {
-  // Set during analysis
-  protected User requestingUser_;
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!analyzer.getAuthzConfig().isEnabled()) {
-      throw new AnalysisException("Authorization is not enabled. To enable " +
-          "authorization restart Impala with the --server_name=<name> flag.");
-    }
-    if (analyzer.getAuthzConfig().isFileBasedPolicy()) {
-      throw new AnalysisException("Cannot execute authorization statement using a file" +
-          " based policy. To disable file based policies, restart Impala without the " +
-          "-authorization_policy_file flag set.");
-    }
-    if (Strings.isNullOrEmpty(analyzer.getUser().getName())) {
-      throw new AnalysisException("Cannot execute authorization statement with an " +
-          "empty username.");
-    }
-    requestingUser_ = analyzer.getUser();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/BaseTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/BaseTableRef.java b/fe/src/main/java/com/cloudera/impala/analysis/BaseTableRef.java
deleted file mode 100644
index 69780e0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/BaseTableRef.java
+++ /dev/null
@@ -1,98 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a reference to an actual table, such as an Hdfs or HBase table.
- * BaseTableRefs are instantiated as a result of table resolution during analysis
- * of a SelectStmt.
- */
-public class BaseTableRef extends TableRef {
-
-  /**
-   * Create a BaseTableRef from the original unresolved table ref as well as
-   * its resolved path. Sets table aliases and join-related attributes.
-   */
-  public BaseTableRef(TableRef tableRef, Path resolvedPath) {
-    super(tableRef);
-    Preconditions.checkState(resolvedPath.isResolved());
-    Preconditions.checkState(resolvedPath.isRootedAtTable());
-    resolvedPath_ = resolvedPath;
-    // Set implicit aliases if no explicit one was given.
-    if (hasExplicitAlias()) return;
-    aliases_ = new String[] {
-        getTable().getTableName().toString().toLowerCase(),
-        getTable().getName().toLowerCase() };
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private BaseTableRef(BaseTableRef other) {
-    super(other);
-  }
-
-  /**
-   * Register this table ref and then analyze the Join clause.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), analyzer);
-    desc_ = analyzer.registerTableRef(this);
-    isAnalyzed_ = true;
-    analyzeHints(analyzer);
-    analyzeJoin(analyzer);
-    analyzeSkipHeaderLineCount();
-  }
-
-  @Override
-  protected String tableRefToSql() {
-    // Enclose the alias in quotes if Hive cannot parse it without quotes.
-    // This is needed for view compatibility between Impala and Hive.
-    String aliasSql = null;
-    String alias = getExplicitAlias();
-    if (alias != null) aliasSql = ToSqlUtils.getIdentSql(alias);
-    String tableHintsSql = ToSqlUtils.getPlanHintsSql(tableHints_);
-    return getTable().getTableName().toSql() +
-        ((aliasSql != null) ? " " + aliasSql : "") +
-        (tableHintsSql != "" ? " " + tableHintsSql : "");
-  }
-
-  public String debugString() { return tableRefToSql(); }
-  @Override
-  protected TableRef clone() { return new BaseTableRef(this); }
-
-  /**
-   * Analyze the 'skip.header.line.count' property.
-   */
-  private void analyzeSkipHeaderLineCount() throws AnalysisException {
-    Table table = getTable();
-    if (!(table instanceof HdfsTable)) return;
-    HdfsTable hdfsTable = (HdfsTable)table;
-
-    StringBuilder error = new StringBuilder();
-    hdfsTable.parseSkipHeaderLineCount(error);
-    if (error.length() > 0) throw new AnalysisException(error.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/BetweenPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/BetweenPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/BetweenPredicate.java
deleted file mode 100644
index d76a4c6..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/BetweenPredicate.java
+++ /dev/null
@@ -1,158 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Class describing between predicates. After successful analysis, we rewrite
- * the between predicate to a conjunctive/disjunctive compound predicate
- * to be handed to the backend.
- */
-public class BetweenPredicate extends Predicate {
-
-  private final boolean isNotBetween_;
-
-  // After successful analysis, we rewrite this between predicate
-  // into a conjunctive/disjunctive compound predicate.
-  private CompoundPredicate rewrittenPredicate_;
-
-  // Children of the BetweenPredicate, since this.children should hold the children
-  // of the rewritten predicate to make sure toThrift() picks up the right ones.
-  private ArrayList<Expr> originalChildren_ = Lists.newArrayList();
-
-  // First child is the comparison expr which should be in [lowerBound, upperBound].
-  public BetweenPredicate(Expr compareExpr, Expr lowerBound, Expr upperBound,
-      boolean isNotBetween) {
-    originalChildren_.add(compareExpr);
-    originalChildren_.add(lowerBound);
-    originalChildren_.add(upperBound);
-    this.isNotBetween_ = isNotBetween;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected BetweenPredicate(BetweenPredicate other) {
-    super(other);
-    isNotBetween_ = other.isNotBetween_;
-    originalChildren_ = Expr.cloneList(other.originalChildren_);
-    if (other.rewrittenPredicate_ != null) {
-      rewrittenPredicate_ = (CompoundPredicate) other.rewrittenPredicate_.clone();
-    }
-  }
-
-  public CompoundPredicate getRewrittenPredicate() {
-    Preconditions.checkState(isAnalyzed_);
-    return rewrittenPredicate_;
-  }
-  public ArrayList<Expr> getOriginalChildren() { return originalChildren_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (originalChildren_.get(0) instanceof Subquery &&
-        (originalChildren_.get(1) instanceof Subquery ||
-         originalChildren_.get(2) instanceof Subquery)) {
-      throw new AnalysisException("Comparison between subqueries is not " +
-          "supported in a between predicate: " + toSqlImpl());
-    }
-    analyzer.castAllToCompatibleType(originalChildren_);
-
-    // Rewrite between predicate into a conjunctive/disjunctive compound predicate.
-    if (isNotBetween_) {
-      // Rewrite into disjunction.
-      Predicate lower = new BinaryPredicate(BinaryPredicate.Operator.LT,
-          originalChildren_.get(0), originalChildren_.get(1));
-      Predicate upper = new BinaryPredicate(BinaryPredicate.Operator.GT,
-          originalChildren_.get(0), originalChildren_.get(2));
-      rewrittenPredicate_ =
-          new CompoundPredicate(CompoundPredicate.Operator.OR, lower, upper);
-    } else {
-      // Rewrite into conjunction.
-      Predicate lower = new BinaryPredicate(BinaryPredicate.Operator.GE,
-          originalChildren_.get(0), originalChildren_.get(1));
-      Predicate upper = new BinaryPredicate(BinaryPredicate.Operator.LE,
-          originalChildren_.get(0), originalChildren_.get(2));
-      rewrittenPredicate_ =
-          new CompoundPredicate(CompoundPredicate.Operator.AND, lower, upper);
-    }
-
-    try {
-      rewrittenPredicate_.analyze(analyzer);
-      fn_ = rewrittenPredicate_.fn_;
-    } catch (AnalysisException e) {
-      // We should have already guaranteed that analysis will succeed.
-      Preconditions.checkState(false, "Analysis failed in rewritten between predicate");
-    }
-
-    // Make sure toThrift() picks up the children of the rewritten predicate.
-    children_ = rewrittenPredicate_.getChildren();
-    // Since the only child is a CompoundPredicate expressing the comparison,
-    // the cost of the comparison is fully captured by the children's cost.
-    evalCost_ = getChildCosts();
-    isAnalyzed_ = true;
-  }
-
-  @Override
-  public List<Expr> getConjuncts() {
-    return rewrittenPredicate_.getConjuncts();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    rewrittenPredicate_.toThrift(msg);
-  }
-
-  @Override
-  public String toSqlImpl() {
-    String notStr = (isNotBetween_) ? "NOT " : "";
-    return originalChildren_.get(0).toSql() + " " + notStr + "BETWEEN " +
-        originalChildren_.get(1).toSql() + " AND " + originalChildren_.get(2).toSql();
-  }
-
-  /**
-   * Also substitute the exprs in originalChildren when cloning.
-   */
-  @Override
-  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
-      throws AnalysisException {
-    BetweenPredicate clone = (BetweenPredicate) super.substituteImpl(smap, analyzer);
-    Preconditions.checkNotNull(clone);
-    clone.originalChildren_ =
-        Expr.substituteList(originalChildren_, smap, analyzer, false);
-    return clone;
-  }
-
-  @Override
-  public Expr clone() { return new BetweenPredicate(this); }
-
-  @Override
-  public Expr reset() {
-    super.reset();
-    originalChildren_ = Expr.resetList(originalChildren_);
-    return this;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/BinaryPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/BinaryPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/BinaryPredicate.java
deleted file mode 100644
index 35d03e1..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/BinaryPredicate.java
+++ /dev/null
@@ -1,388 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.extdatasource.thrift.TComparisonOp;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-
-/**
- * Most predicates with two operands.
- *
- */
-public class BinaryPredicate extends Predicate {
-  private final static Logger LOG = LoggerFactory.getLogger(BinaryPredicate.class);
-
-  // true if this BinaryPredicate is inferred from slot equivalences, false otherwise.
-  private boolean isInferred_ = false;
-
-  public enum Operator {
-    EQ("=", "eq", TComparisonOp.EQ),
-    NE("!=", "ne", TComparisonOp.NE),
-    LE("<=", "le", TComparisonOp.LE),
-    GE(">=", "ge", TComparisonOp.GE),
-    LT("<", "lt", TComparisonOp.LT),
-    GT(">", "gt", TComparisonOp.GT),
-    DISTINCT_FROM("IS DISTINCT FROM", "distinctfrom", TComparisonOp.DISTINCT_FROM),
-    NOT_DISTINCT("IS NOT DISTINCT FROM", "notdistinct", TComparisonOp.NOT_DISTINCT),
-    // Same as EQ, except it returns True if the rhs is NULL. There is no backend
-    // function for this. The functionality is embedded in the hash-join
-    // implementation.
-    NULL_MATCHING_EQ("=", "null_matching_eq", TComparisonOp.EQ);
-
-    private final String description_;
-    private final String name_;
-    private final TComparisonOp thriftOp_;
-
-    private Operator(String description, String name, TComparisonOp thriftOp) {
-      this.description_ = description;
-      this.name_ = name;
-      this.thriftOp_ = thriftOp;
-    }
-
-    @Override
-    public String toString() { return description_; }
-    public String getName() { return name_; }
-    public TComparisonOp getThriftOp() { return thriftOp_; }
-    public boolean isEquivalence() { return this == EQ || this == NOT_DISTINCT; }
-
-    public Operator converse() {
-      switch (this) {
-        case EQ: return EQ;
-        case NE: return NE;
-        case LE: return GE;
-        case GE: return LE;
-        case LT: return GT;
-        case GT: return LT;
-        case DISTINCT_FROM: return DISTINCT_FROM;
-        case NOT_DISTINCT: return NOT_DISTINCT;
-        case NULL_MATCHING_EQ:
-          throw new IllegalStateException("Not implemented");
-        default: throw new IllegalStateException("Invalid operator");
-      }
-    }
-  }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue; // NULL is handled through type promotion.
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.EQ.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.NE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.LE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.GE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.LT.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          Operator.GT.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
-    }
-  }
-
-  /**
-   * Normalizes a 'predicate' consisting of an uncast SlotRef and a constant Expr into
-   * the following form: <SlotRef> <Op> <LiteralExpr>
-   * If 'predicate' cannot be expressed in this way, null is returned.
-   */
-  public static BinaryPredicate normalizeSlotRefComparison(BinaryPredicate predicate,
-      Analyzer analyzer) {
-    SlotRef ref = null;
-    if (predicate.getChild(0) instanceof SlotRef) {
-      ref = (SlotRef) predicate.getChild(0);
-    } else if (predicate.getChild(1) instanceof SlotRef) {
-      ref = (SlotRef) predicate.getChild(1);
-    }
-
-    if (ref == null) return null;
-    if (ref != predicate.getChild(0)) {
-      Preconditions.checkState(ref == predicate.getChild(1));
-      predicate = new BinaryPredicate(predicate.getOp().converse(), ref,
-          predicate.getChild(0));
-      predicate.analyzeNoThrow(analyzer);
-    }
-
-    try {
-      predicate.foldConstantChildren(analyzer);
-    } catch (AnalysisException ex) {
-      // Throws if the expression cannot be evaluated by the BE.
-      return null;
-    }
-    predicate.analyzeNoThrow(analyzer);
-    if (!(predicate.getChild(1) instanceof LiteralExpr)) return null;
-    return predicate;
-  }
-
-  private Operator op_;
-
-  public Operator getOp() { return op_; }
-  public void setOp(Operator op) { op_ = op; }
-
-  public BinaryPredicate(Operator op, Expr e1, Expr e2) {
-    super();
-    this.op_ = op;
-    Preconditions.checkNotNull(e1);
-    children_.add(e1);
-    Preconditions.checkNotNull(e2);
-    children_.add(e2);
-  }
-
-  protected BinaryPredicate(BinaryPredicate other) {
-    super(other);
-    op_ = other.op_;
-    isInferred_ = other.isInferred_;
-  }
-
-  public boolean isNullMatchingEq() { return op_ == Operator.NULL_MATCHING_EQ; }
-
-  public boolean isInferred() { return isInferred_; }
-  public void setIsInferred() { isInferred_ = true; }
-
-  @Override
-  public String toSqlImpl() {
-    return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    Preconditions.checkState(children_.size() == 2);
-    // Cannot serialize a nested predicate.
-    Preconditions.checkState(!contains(Subquery.class));
-    // This check is important because we often clone and/or evaluate predicates,
-    // and it's easy to get the casting logic wrong, e.g., cloned predicates
-    // with expr substitutions need to be re-analyzed with reanalyze().
-    Preconditions.checkState(getChild(0).getType().getPrimitiveType() ==
-                             getChild(1).getType().getPrimitiveType(),
-        "child 0 type: " + getChild(0).getType() +
-        " child 1 type: " + getChild(1).getType());
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("op", op_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    convertNumericLiteralsFromDecimal(analyzer);
-    String opName = op_.getName().equals("null_matching_eq") ? "eq" : op_.getName();
-    fn_ = getBuiltinFunction(analyzer, opName, collectChildReturnTypes(),
-        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    if (fn_ == null) {
-      // Construct an appropriate error message and throw an AnalysisException.
-      String errMsg = "operands of type " + getChild(0).getType().toSql() + " and " +
-            getChild(1).getType().toSql()  + " are not comparable: " + toSql();
-
-      // Check if any of the children is a Subquery that does not return a
-      // scalar.
-      for (Expr expr: children_) {
-        if (expr instanceof Subquery && !expr.getType().isScalarType()) {
-          errMsg = "Subquery must return a single row: " + expr.toSql();
-          break;
-        }
-      }
-
-      throw new AnalysisException(errMsg);
-    }
-    Preconditions.checkState(fn_.getReturnType().isBoolean());
-
-    ArrayList<Expr> subqueries = Lists.newArrayList();
-    collectAll(Predicates.instanceOf(Subquery.class), subqueries);
-    if (subqueries.size() > 1) {
-      // TODO Remove that restriction when we add support for independent subquery
-      // evaluation.
-      throw new AnalysisException("Multiple subqueries are not supported in binary " +
-          "predicates: " + toSql());
-    }
-    if (contains(ExistsPredicate.class)) {
-      throw new AnalysisException("EXISTS subquery predicates are not " +
-          "supported in binary predicates: " + toSql());
-    }
-
-    List<InPredicate> inPredicates = Lists.newArrayList();
-    collect(InPredicate.class, inPredicates);
-    for (InPredicate inPredicate: inPredicates) {
-      if (inPredicate.contains(Subquery.class)) {
-        throw new AnalysisException("IN subquery predicates are not supported in " +
-            "binary predicates: " + toSql());
-      }
-    }
-
-    // Don't perform any casting for predicates with subqueries here. Any casting
-    // required will be performed when the subquery is unnested.
-    if (!contains(Subquery.class)) castForFunctionCall(true);
-
-    // Determine selectivity
-    // TODO: Compute selectivity for nested predicates.
-    // TODO: Improve estimation using histograms.
-    Reference<SlotRef> slotRefRef = new Reference<SlotRef>();
-    if ((op_ == Operator.EQ || op_ == Operator.NOT_DISTINCT)
-        && isSingleColumnPredicate(slotRefRef, null)) {
-      long distinctValues = slotRefRef.getRef().getNumDistinctValues();
-      if (distinctValues > 0) {
-        selectivity_ = 1.0 / distinctValues;
-        selectivity_ = Math.max(0, Math.min(1, selectivity_));
-      }
-    }
-
-    // Compute cost.
-    if (hasChildCosts()) {
-      if (getChild(0).getType().isFixedLengthType()) {
-        evalCost_ = getChildCosts() + BINARY_PREDICATE_COST;
-      } else if (getChild(0).getType().isStringType()) {
-        evalCost_ = getChildCosts() +
-            (float) (getAvgStringLength(getChild(0)) + getAvgStringLength(getChild(1)) *
-            BINARY_PREDICATE_COST);
-      } else {
-        //TODO(tmarshall): Handle other var length types here.
-        evalCost_ = getChildCosts() + VAR_LEN_BINARY_PREDICATE_COST;
-      }
-    }
-  }
-
-  /**
-   * If predicate is of the form "<slotref> <op> <expr>", returns expr,
-   * otherwise returns null. Slotref may be wrapped in a CastExpr.
-   * TODO: revisit CAST handling at the caller
-   */
-  public Expr getSlotBinding(SlotId id) {
-    // check left operand
-    SlotRef slotRef = getChild(0).unwrapSlotRef(false);
-    if (slotRef != null && slotRef.getSlotId() == id) return getChild(1);
-    // check right operand
-    slotRef = getChild(1).unwrapSlotRef(false);
-    if (slotRef != null && slotRef.getSlotId() == id) return getChild(0);
-    return null;
-  }
-
-  /**
-   * If e is an equality predicate between two slots that only require implicit
-   * casts, returns those two slots; otherwise returns null.
-   */
-  public static Pair<SlotId, SlotId> getEqSlots(Expr e) {
-    if (!(e instanceof BinaryPredicate)) return null;
-    return ((BinaryPredicate) e).getEqSlots();
-  }
-
-  /**
-   * If this is an equality predicate between two slots that only require implicit
-   * casts, returns those two slots; otherwise returns null.
-   */
-  @Override
-  public Pair<SlotId, SlotId> getEqSlots() {
-    if (op_ != Operator.EQ) return null;
-    SlotRef lhs = getChild(0).unwrapSlotRef(true);
-    if (lhs == null) return null;
-    SlotRef rhs = getChild(1).unwrapSlotRef(true);
-    if (rhs == null) return null;
-    return new Pair<SlotId, SlotId>(lhs.getSlotId(), rhs.getSlotId());
-  }
-
-  /**
-   * If predicate is of the form "<SlotRef> op <Expr>" or "<Expr> op <SlotRef>",
-   * returns the SlotRef, otherwise returns null.
-   */
-  @Override
-  public SlotRef getBoundSlot() {
-    SlotRef slotRef = getChild(0).unwrapSlotRef(true);
-    if (slotRef != null) return slotRef;
-    return getChild(1).unwrapSlotRef(true);
-  }
-
-  /**
-   * Negates a BinaryPredicate.
-   */
-  @Override
-  public Expr negate() {
-    Operator newOp = null;
-    switch (op_) {
-      case EQ:
-        newOp = Operator.NE;
-        break;
-      case NE:
-        newOp = Operator.EQ;
-        break;
-      case LT:
-        newOp = Operator.GE;
-        break;
-      case LE:
-        newOp = Operator.GT;
-        break;
-      case GE:
-        newOp = Operator.LT;
-        break;
-      case GT:
-        newOp = Operator.LE;
-        break;
-      case DISTINCT_FROM:
-        newOp = Operator.NOT_DISTINCT;
-        break;
-      case NOT_DISTINCT:
-        newOp = Operator.DISTINCT_FROM;
-        break;
-      case NULL_MATCHING_EQ:
-        throw new IllegalStateException("Not implemented");
-    }
-    return new BinaryPredicate(newOp, getChild(0), getChild(1));
-  }
-
-  /**
-   * Swaps the first with the second child in-place. Only valid to call for
-   * equivalence and not equal predicates.
-   */
-  public void reverse() {
-    Preconditions.checkState(op_.isEquivalence() || op_ == Operator.NE);
-    Collections.swap(children_, 0, 1);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    BinaryPredicate other = (BinaryPredicate) obj;
-    return op_.equals(other.op_);
-  }
-
-  @Override
-  public Expr clone() { return new BinaryPredicate(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/BoolLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/BoolLiteral.java b/fe/src/main/java/com/cloudera/impala/analysis/BoolLiteral.java
deleted file mode 100644
index 03b2b1f..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/BoolLiteral.java
+++ /dev/null
@@ -1,113 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TBoolLiteral;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-
-public class BoolLiteral extends LiteralExpr {
-  private final boolean value_;
-
-  public BoolLiteral(boolean value) {
-    this.value_ = value;
-    type_ = Type.BOOLEAN;
-    evalCost_ = LITERAL_COST;
-  }
-
-  public BoolLiteral(String value) throws AnalysisException {
-    type_ = Type.BOOLEAN;
-    evalCost_ = LITERAL_COST;
-    if (value.toLowerCase().equals("true")) {
-      this.value_ = true;
-    } else if (value.toLowerCase().equals("false")) {
-      this.value_ = false;
-    } else {
-      throw new AnalysisException("invalid BOOLEAN literal: " + value);
-    }
-  }
-
-  /**
-   * Copy c'tor used in clone.
-   */
-  protected BoolLiteral(BoolLiteral other) {
-    super(other);
-    value_ = other.value_;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("value", value_)
-        .toString();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) {
-      return false;
-    }
-    return ((BoolLiteral) obj).value_ == value_;
-  }
-
-  @Override
-  public int hashCode() { return value_ ? 1 : 0; }
-
-  public boolean getValue() { return value_; }
-
-  @Override
-  public String toSqlImpl() {
-    return getStringValue();
-  }
-
-  @Override
-  public String getStringValue() {
-    return value_ ? "TRUE" : "FALSE";
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.BOOL_LITERAL;
-    msg.bool_literal = new TBoolLiteral(value_);
-  }
-
-  @Override
-  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
-    if (targetType.equals(this.type_)) {
-      return this;
-    } else {
-      return new CastExpr(targetType, this);
-    }
-  }
-
-  @Override
-  public int compareTo(LiteralExpr o) {
-    int ret = super.compareTo(o);
-    if (ret != 0) return ret;
-    BoolLiteral other = (BoolLiteral) o;
-    if (value_ && !other.getValue()) return 1;
-    if (!value_ && other.getValue()) return -1;
-    return 0;
-  }
-
-  @Override
-  public Expr clone() { return new BoolLiteral(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CaseExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CaseExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/CaseExpr.java
deleted file mode 100644
index bd3ec83..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CaseExpr.java
+++ /dev/null
@@ -1,379 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCaseExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * CASE and DECODE are represented using this class. The backend implementation is
- * always the "case" function.
- *
- * The internal representation of
- *   CASE [expr] WHEN expr THEN expr [WHEN expr THEN expr ...] [ELSE expr] END
- * Each When/Then is stored as two consecutive children (whenExpr, thenExpr). If a case
- * expr is given then it is the first child. If an else expr is given then it is the
- * last child.
- *
- * The internal representation of
- *   DECODE(expr, key_expr, val_expr [, key_expr, val_expr ...] [, default_val_expr])
- * has a pair of children for each pair of key/val_expr and an additional child if the
- * default_val_expr was given. The first child represents the comparison of expr to
- * key_expr. Decode has three forms:
- *   1) DECODE(expr, null_literal, val_expr) -
- *       child[0] = IsNull(expr)
- *   2) DECODE(expr, non_null_literal, val_expr) -
- *       child[0] = Eq(expr, literal)
- *   3) DECODE(expr1, expr2, val_expr) -
- *       child[0] = Or(And(IsNull(expr1), IsNull(expr2)),  Eq(expr1, expr2))
- * The children representing val_expr (child[1]) and default_val_expr (child[2]) are
- * simply the exprs themselves.
- *
- * Example of equivalent CASE for DECODE(foo, 'bar', 1, col, 2, NULL, 3, 4):
- *   CASE
- *     WHEN foo = 'bar' THEN 1   -- no need for IS NULL check
- *     WHEN foo IS NULL AND col IS NULL OR foo = col THEN 2
- *     WHEN foo IS NULL THEN 3  -- no need for equality check
- *     ELSE 4
- *   END
- */
-public class CaseExpr extends Expr {
-
-  // Set if constructed from a DECODE, null otherwise.
-  private FunctionCallExpr decodeExpr_;
-
-  private boolean hasCaseExpr_;
-  private boolean hasElseExpr_;
-
-  public CaseExpr(Expr caseExpr, List<CaseWhenClause> whenClauses, Expr elseExpr) {
-    super();
-    if (caseExpr != null) {
-      children_.add(caseExpr);
-      hasCaseExpr_ = true;
-    }
-    for (CaseWhenClause whenClause: whenClauses) {
-      Preconditions.checkNotNull(whenClause.getWhenExpr());
-      children_.add(whenClause.getWhenExpr());
-      Preconditions.checkNotNull(whenClause.getThenExpr());
-      children_.add(whenClause.getThenExpr());
-    }
-    if (elseExpr != null) {
-      children_.add(elseExpr);
-      hasElseExpr_ = true;
-    }
-  }
-
-  /**
-   * Constructs an equivalent CaseExpr representation.
-   *
-   * The DECODE behavior is basically the same as the hasCaseExpr_ version of CASE.
-   * Though there is one difference. NULLs are considered equal when comparing the
-   * argument to be decoded with the candidates. This differences is for compatibility
-   * with Oracle. http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm.
-   * To account for the difference, the CASE representation will use the non-hasCaseExpr_
-   * version.
-   *
-   * The return type of DECODE differs from that of Oracle when the third argument is
-   * the NULL literal. In Oracle the return type is STRING. In Impala the return type is
-   * determined by the implicit casting rules (i.e. it's not necessarily a STRING). This
-   * is done so seemingly normal usages such as DECODE(int_col, tinyint_col, NULL,
-   * bigint_col) will avoid type check errors (STRING incompatible with BIGINT).
-   */
-  public CaseExpr(FunctionCallExpr decodeExpr) {
-    super();
-    decodeExpr_ = decodeExpr;
-    hasCaseExpr_ = false;
-
-    int childIdx = 0;
-    Expr encoded = null;
-    Expr encodedIsNull = null;
-    if (!decodeExpr.getChildren().isEmpty()) {
-      encoded = decodeExpr.getChild(childIdx++);
-      encodedIsNull = new IsNullPredicate(encoded, false);
-    }
-
-    // Add the key_expr/val_expr pairs
-    while (childIdx + 2 <= decodeExpr.getChildren().size()) {
-      Expr candidate = decodeExpr.getChild(childIdx++);
-      if (candidate.isLiteral()) {
-        if (candidate.isNullLiteral()) {
-          // An example case is DECODE(foo, NULL, bar), since NULLs are considered
-          // equal, this becomes CASE WHEN foo IS NULL THEN bar END.
-          children_.add(encodedIsNull);
-        } else {
-          children_.add(new BinaryPredicate(
-              BinaryPredicate.Operator.EQ, encoded, candidate));
-        }
-      } else {
-        children_.add(new CompoundPredicate(CompoundPredicate.Operator.OR,
-            new CompoundPredicate(CompoundPredicate.Operator.AND,
-                encodedIsNull, new IsNullPredicate(candidate, false)),
-            new BinaryPredicate(BinaryPredicate.Operator.EQ, encoded, candidate)));
-      }
-
-      // Add the value
-      children_.add(decodeExpr.getChild(childIdx++));
-    }
-
-    // Add the default value
-    if (childIdx < decodeExpr.getChildren().size()) {
-      hasElseExpr_ = true;
-      children_.add(decodeExpr.getChild(childIdx));
-    }
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected CaseExpr(CaseExpr other) {
-    super(other);
-    decodeExpr_ = other.decodeExpr_;
-    hasCaseExpr_ = other.hasCaseExpr_;
-    hasElseExpr_ = other.hasElseExpr_;
-  }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue;
-      if (t.isScalarType(PrimitiveType.CHAR)) continue;
-      // TODO: case is special and the signature cannot be represented.
-      // It is alternating varargs
-      // e.g. case(bool, type, bool type, bool type, etc).
-      // Instead we just add a version for each of the return types
-      // e.g. case(BOOLEAN), case(INT), etc
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          "case", "", Lists.newArrayList(t), t));
-      // Same for DECODE
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          "decode", "", Lists.newArrayList(t), t));
-    }
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    CaseExpr expr = (CaseExpr) obj;
-    return hasCaseExpr_ == expr.hasCaseExpr_
-        && hasElseExpr_ == expr.hasElseExpr_
-        && isDecode() == expr.isDecode();
-  }
-
-  @Override
-  public String toSqlImpl() {
-    return (decodeExpr_ == null) ? toCaseSql() : decodeExpr_.toSqlImpl();
-  }
-
-  @VisibleForTesting
-  String toCaseSql() {
-    StringBuilder output = new StringBuilder("CASE");
-    int childIdx = 0;
-    if (hasCaseExpr_) {
-      output.append(" " + children_.get(childIdx++).toSql());
-    }
-    while (childIdx + 2 <= children_.size()) {
-      output.append(" WHEN " + children_.get(childIdx++).toSql());
-      output.append(" THEN " + children_.get(childIdx++).toSql());
-    }
-    if (hasElseExpr_) {
-      output.append(" ELSE " + children_.get(children_.size() - 1).toSql());
-    }
-    output.append(" END");
-    return output.toString();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.CASE_EXPR;
-    msg.case_expr = new TCaseExpr(hasCaseExpr_, hasElseExpr_);
-  }
-
-  private void castCharToString(int childIndex) throws AnalysisException {
-    if (children_.get(childIndex).getType().isScalarType(PrimitiveType.CHAR)) {
-      children_.set(childIndex, children_.get(childIndex).castTo(ScalarType.STRING));
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    if (isDecode()) {
-      Preconditions.checkState(!hasCaseExpr_);
-      // decodeExpr_.analyze() would fail validating function existence. The complex
-      // vararg signature is currently unsupported.
-      FunctionCallExpr.validateScalarFnParams(decodeExpr_.getParams());
-      if (decodeExpr_.getChildren().size() < 3) {
-        throw new AnalysisException("DECODE in '" + toSql() + "' requires at least 3 "
-            + "arguments.");
-      }
-    }
-
-    // Since we have no BE implementation of a CaseExpr with CHAR types,
-    // we cast the CHAR-typed whenExprs and caseExprs to STRING,
-    // TODO: This casting is not always correct and needs to be fixed, see IMPALA-1652.
-
-    // Keep track of maximum compatible type of case expr and all when exprs.
-    Type whenType = null;
-    // Keep track of maximum compatible type of else expr and all then exprs.
-    Type returnType = null;
-    // Remember last of these exprs for error reporting.
-    Expr lastCompatibleThenExpr = null;
-    Expr lastCompatibleWhenExpr = null;
-    int loopEnd = children_.size();
-    if (hasElseExpr_) {
-      --loopEnd;
-    }
-    int loopStart;
-    Expr caseExpr = null;
-    // Set loop start, and initialize returnType as type of castExpr.
-    if (hasCaseExpr_) {
-      loopStart = 1;
-      castCharToString(0);
-      caseExpr = children_.get(0);
-      caseExpr.analyze(analyzer);
-      whenType = caseExpr.getType();
-      lastCompatibleWhenExpr = children_.get(0);
-    } else {
-      whenType = Type.BOOLEAN;
-      loopStart = 0;
-    }
-
-    // Go through when/then exprs and determine compatible types.
-    for (int i = loopStart; i < loopEnd; i += 2) {
-      castCharToString(i);
-      Expr whenExpr = children_.get(i);
-      if (hasCaseExpr_) {
-        // Determine maximum compatible type of the case expr,
-        // and all when exprs seen so far. We will add casts to them at the very end.
-        whenType = analyzer.getCompatibleType(whenType,
-            lastCompatibleWhenExpr, whenExpr);
-        lastCompatibleWhenExpr = whenExpr;
-      } else {
-        // If no case expr was given, then the when exprs should always return
-        // boolean or be castable to boolean.
-        if (!Type.isImplicitlyCastable(whenExpr.getType(), Type.BOOLEAN, false)) {
-          Preconditions.checkState(isCase());
-          throw new AnalysisException("When expr '" + whenExpr.toSql() + "'" +
-              " is not of type boolean and not castable to type boolean.");
-        }
-        // Add a cast if necessary.
-        if (!whenExpr.getType().isBoolean()) castChild(Type.BOOLEAN, i);
-      }
-      // Determine maximum compatible type of the then exprs seen so far.
-      // We will add casts to them at the very end.
-      Expr thenExpr = children_.get(i + 1);
-      returnType = analyzer.getCompatibleType(returnType,
-          lastCompatibleThenExpr, thenExpr);
-      lastCompatibleThenExpr = thenExpr;
-    }
-    if (hasElseExpr_) {
-      Expr elseExpr = children_.get(children_.size() - 1);
-      returnType = analyzer.getCompatibleType(returnType,
-          lastCompatibleThenExpr, elseExpr);
-    }
-
-    // Make sure BE doesn't see TYPE_NULL by picking an arbitrary type
-    if (whenType.isNull()) whenType = ScalarType.BOOLEAN;
-    if (returnType.isNull()) returnType = ScalarType.BOOLEAN;
-
-    // Add casts to case expr to compatible type.
-    if (hasCaseExpr_) {
-      // Cast case expr.
-      if (!children_.get(0).type_.equals(whenType)) {
-        castChild(whenType, 0);
-      }
-      // Add casts to when exprs to compatible type.
-      for (int i = loopStart; i < loopEnd; i += 2) {
-        if (!children_.get(i).type_.equals(whenType)) {
-          castChild(whenType, i);
-        }
-      }
-    }
-    // Cast then exprs to compatible type.
-    for (int i = loopStart + 1; i < children_.size(); i += 2) {
-      if (!children_.get(i).type_.equals(returnType)) {
-        castChild(returnType, i);
-      }
-    }
-    // Cast else expr to compatible type.
-    if (hasElseExpr_) {
-      if (!children_.get(children_.size() - 1).type_.equals(returnType)) {
-        castChild(returnType, children_.size() - 1);
-      }
-    }
-
-    // Do the function lookup just based on the whenType.
-    Type[] args = new Type[1];
-    args[0] = whenType;
-    fn_ = getBuiltinFunction(analyzer, "case", args,
-        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    Preconditions.checkNotNull(fn_);
-    type_ = returnType;
-
-    // Compute cost as the sum of evaluating all of the WHEN exprs, plus
-    // the max of the THEN/ELSE exprs.
-    float maxThenCost = 0;
-    float whenCosts = 0;
-    boolean hasChildCosts = true;
-    for (int i = 0; i < children_.size(); ++i) {
-      if (!getChild(i).hasCost()) {
-        hasChildCosts = false;
-        break;
-      }
-
-      if (hasCaseExpr_ && i % 2 == 1) {
-        // This child is a WHEN expr. BINARY_PREDICATE_COST accounts for the cost of
-        // comparing the CASE expr to the WHEN expr.
-        whenCosts += getChild(0).getCost() + getChild(i).getCost() +
-          BINARY_PREDICATE_COST;
-      } else if (!hasCaseExpr_ && i % 2 == 0) {
-        // This child is a WHEN expr.
-        whenCosts += getChild(i).getCost();
-      } else if (i != 0) {
-        // This child is a THEN or ELSE expr.
-        float thenCost = getChild(i).getCost();
-        if (thenCost > maxThenCost) maxThenCost = thenCost;
-      }
-    }
-    if (hasChildCosts) {
-      evalCost_ =  whenCosts + maxThenCost;
-    }
-  }
-
-  private boolean isCase() { return !isDecode(); }
-  private boolean isDecode() { return decodeExpr_ != null; }
-  public boolean hasCaseExpr() { return hasCaseExpr_; }
-  public boolean hasElseExpr() { return hasElseExpr_; }
-
-  @Override
-  public Expr clone() { return new CaseExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CaseWhenClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CaseWhenClause.java b/fe/src/main/java/com/cloudera/impala/analysis/CaseWhenClause.java
deleted file mode 100644
index 8b1433e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CaseWhenClause.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-
-/**
- * captures info of a single WHEN expr THEN expr clause.
- *
- */
-class CaseWhenClause {
-  private final Expr whenExpr_;
-  private final Expr thenExpr_;
-
-  public CaseWhenClause(Expr whenExpr, Expr thenExpr) {
-    super();
-    this.whenExpr_ = whenExpr;
-    this.thenExpr_ = thenExpr;
-  }
-
-  public Expr getWhenExpr() {
-    return whenExpr_;
-  }
-
-  public Expr getThenExpr() {
-    return thenExpr_;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CastExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CastExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/CastExpr.java
deleted file mode 100644
index 2b3b271..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CastExpr.java
+++ /dev/null
@@ -1,312 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class CastExpr extends Expr {
-  // Only set for explicit casts. Null for implicit casts.
-  private final TypeDef targetTypeDef_;
-
-  // True if this is a "pre-analyzed" implicit cast.
-  private final boolean isImplicit_;
-
-  // True if this cast does not change the type.
-  private boolean noOp_ = false;
-
-  /**
-   * C'tor for "pre-analyzed" implicit casts.
-   */
-  public CastExpr(Type targetType, Expr e) {
-    super();
-    Preconditions.checkState(targetType.isValid());
-    Preconditions.checkNotNull(e);
-    type_ = targetType;
-    targetTypeDef_ = null;
-    isImplicit_ = true;
-    // replace existing implicit casts
-    if (e instanceof CastExpr) {
-      CastExpr castExpr = (CastExpr) e;
-      if (castExpr.isImplicit()) e = castExpr.getChild(0);
-    }
-    children_.add(e);
-
-    // Implicit casts don't call analyze()
-    // TODO: this doesn't seem like the cleanest approach but there are places
-    // we generate these (e.g. table loading) where there is no analyzer object.
-    try {
-      analyze();
-      computeNumDistinctValues();
-    } catch (AnalysisException ex) {
-      Preconditions.checkState(false,
-          "Implicit casts should never throw analysis exception.");
-    }
-    isAnalyzed_ = true;
-  }
-
-  /**
-   * C'tor for explicit casts.
-   */
-  public CastExpr(TypeDef targetTypeDef, Expr e) {
-    Preconditions.checkNotNull(targetTypeDef);
-    Preconditions.checkNotNull(e);
-    isImplicit_ = false;
-    targetTypeDef_ = targetTypeDef;
-    children_.add(e);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected CastExpr(CastExpr other) {
-    super(other);
-    targetTypeDef_ = other.targetTypeDef_;
-    isImplicit_ = other.isImplicit_;
-    noOp_ = other.noOp_;
-  }
-
-  private static String getFnName(Type targetType) {
-    return "castTo" + targetType.getPrimitiveType().toString();
-  }
-
-  public static void initBuiltins(Db db) {
-    for (Type fromType : Type.getSupportedTypes()) {
-      if (fromType.isNull()) continue;
-      for (Type toType : Type.getSupportedTypes()) {
-        if (toType.isNull()) continue;
-        // Disable casting from string to boolean
-        if (fromType.isStringType() && toType.isBoolean()) continue;
-        // Disable casting from boolean/timestamp to decimal
-        if ((fromType.isBoolean() || fromType.isDateType()) && toType.isDecimal()) {
-          continue;
-        }
-        if (fromType.getPrimitiveType() == PrimitiveType.STRING
-            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
-          // Allow casting from String to Char(N)
-          String beSymbol = "impala::CastFunctions::CastToChar";
-          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
-              Lists.newArrayList((Type) ScalarType.STRING), false, ScalarType.CHAR,
-              beSymbol, null, null, true));
-          continue;
-        }
-        if (fromType.getPrimitiveType() == PrimitiveType.CHAR
-            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
-          // Allow casting from CHAR(N) to Char(N)
-          String beSymbol = "impala::CastFunctions::CastToChar";
-          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
-              Lists.newArrayList((Type) ScalarType.createCharType(-1)), false,
-              ScalarType.CHAR, beSymbol, null, null, true));
-          continue;
-        }
-        if (fromType.getPrimitiveType() == PrimitiveType.VARCHAR
-            && toType.getPrimitiveType() == PrimitiveType.VARCHAR) {
-          // Allow casting from VARCHAR(N) to VARCHAR(M)
-          String beSymbol = "impala::CastFunctions::CastToStringVal";
-          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.VARCHAR),
-              Lists.newArrayList((Type) ScalarType.VARCHAR), false, ScalarType.VARCHAR,
-              beSymbol, null, null, true));
-          continue;
-        }
-        if (fromType.getPrimitiveType() == PrimitiveType.VARCHAR
-            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
-          // Allow casting from VARCHAR(N) to CHAR(M)
-          String beSymbol = "impala::CastFunctions::CastToChar";
-          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
-              Lists.newArrayList((Type) ScalarType.VARCHAR), false, ScalarType.CHAR,
-              beSymbol, null, null, true));
-          continue;
-        }
-        if (fromType.getPrimitiveType() == PrimitiveType.CHAR
-            && toType.getPrimitiveType() == PrimitiveType.VARCHAR) {
-          // Allow casting from CHAR(N) to VARCHAR(M)
-          String beSymbol = "impala::CastFunctions::CastToStringVal";
-          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.VARCHAR),
-              Lists.newArrayList((Type) ScalarType.CHAR), false, ScalarType.VARCHAR,
-              beSymbol, null, null, true));
-          continue;
-        }
-        // Disable no-op casts
-        if (fromType.equals(toType) && !fromType.isDecimal()) continue;
-        String beClass = toType.isDecimal() || fromType.isDecimal() ?
-            "DecimalOperators" : "CastFunctions";
-        String beSymbol = "impala::" + beClass + "::CastTo" + Function.getUdfType(toType);
-        db.addBuiltin(ScalarFunction.createBuiltin(getFnName(toType),
-            Lists.newArrayList(fromType), false, toType, beSymbol,
-            null, null, true));
-      }
-    }
-  }
-
-  @Override
-  public String toSqlImpl() {
-    if (isImplicit_) return getChild(0).toSql();
-    return "CAST(" + getChild(0).toSql() + " AS " + targetTypeDef_.toString() + ")";
-  }
-
-  @Override
-  protected void treeToThriftHelper(TExpr container) {
-    if (noOp_) {
-      getChild(0).treeToThriftHelper(container);
-      return;
-    }
-    super.treeToThriftHelper(container);
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("isImplicit", isImplicit_)
-        .add("target", type_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  public boolean isImplicit() { return isImplicit_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    Preconditions.checkState(!isImplicit_);
-    super.analyze(analyzer);
-    targetTypeDef_.analyze(analyzer);
-    type_ = targetTypeDef_.getType();
-    analyze();
-  }
-
-  private void analyze() throws AnalysisException {
-    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + CAST_COST;
-
-    Preconditions.checkNotNull(type_);
-    if (type_.isComplexType()) {
-      throw new AnalysisException(
-          "Unsupported cast to complex type: " + type_.toSql());
-    }
-
-    boolean readyForCharCast =
-        children_.get(0).getType().getPrimitiveType() == PrimitiveType.STRING ||
-        children_.get(0).getType().getPrimitiveType() == PrimitiveType.CHAR;
-    if (type_.getPrimitiveType() == PrimitiveType.CHAR && !readyForCharCast) {
-      // Back end functions only exist to cast string types to CHAR, there is not a cast
-      // for every type since it is redundant with STRING. Casts to go through 2 casts:
-      // (1) cast to string, to stringify the value
-      // (2) cast to CHAR, to truncate or pad with spaces
-      CastExpr tostring = new CastExpr(ScalarType.STRING, children_.get(0));
-      tostring.analyze();
-      children_.set(0, tostring);
-    }
-
-    if (children_.get(0) instanceof NumericLiteral && type_.isFloatingPointType()) {
-      // Special case casting a decimal literal to a floating point number. The
-      // decimal literal can be interpreted as either and we want to avoid casts
-      // since that can result in loss of accuracy.
-      ((NumericLiteral)children_.get(0)).explicitlyCastToFloat(type_);
-    }
-
-    if (children_.get(0).getType().isNull()) {
-      // Make sure BE never sees TYPE_NULL
-      uncheckedCastChild(type_, 0);
-    }
-
-    // Ensure child has non-null type (even if it's a null literal). This is required
-    // for the UDF interface.
-    if (children_.get(0) instanceof NullLiteral) {
-      NullLiteral nullChild = (NullLiteral)(children_.get(0));
-      nullChild.uncheckedCastTo(type_);
-    }
-
-    Type childType = children_.get(0).type_;
-    Preconditions.checkState(!childType.isNull());
-    if (childType.equals(type_)) {
-      noOp_ = true;
-      return;
-    }
-
-    FunctionName fnName = new FunctionName(Catalog.BUILTINS_DB, getFnName(type_));
-    Type[] args = { childType };
-    Function searchDesc = new Function(fnName, args, Type.INVALID, false);
-    if (isImplicit_) {
-      fn_ = Catalog.getBuiltin(searchDesc, CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-      Preconditions.checkState(fn_ != null);
-    } else {
-      fn_ = Catalog.getBuiltin(searchDesc, CompareMode.IS_IDENTICAL);
-      if (fn_ == null) {
-        // allow for promotion from CHAR to STRING; only if no exact match is found
-        fn_ = Catalog.getBuiltin(searchDesc.promoteCharsToStrings(),
-            CompareMode.IS_IDENTICAL);
-      }
-    }
-    if (fn_ == null) {
-      throw new AnalysisException("Invalid type cast of " + getChild(0).toSql() +
-          " from " + childType + " to " + type_);
-    }
-
-    Preconditions.checkState(type_.matchesType(fn_.getReturnType()),
-        type_ + " != " + fn_.getReturnType());
-  }
-
-  /**
-   * Returns child expr if this expr is an implicit cast, otherwise returns 'this'.
-   */
-  @Override
-  public Expr ignoreImplicitCast() {
-    if (isImplicit_) {
-      // we don't expect to see to consecutive implicit casts
-      Preconditions.checkState(
-          !(getChild(0) instanceof CastExpr) || !((CastExpr) getChild(0)).isImplicit());
-      return getChild(0);
-    } else {
-      return this;
-    }
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (obj instanceof CastExpr) {
-      CastExpr other = (CastExpr) obj;
-      return isImplicit_ == other.isImplicit_
-          && type_.equals(other.type_)
-          && super.equals(obj);
-    }
-    // Ignore implicit casts when comparing expr trees.
-    if (isImplicit_) return getChild(0).equals(obj);
-    return false;
-  }
-
-  @Override
-  public Expr clone() { return new CastExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CollectionStructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CollectionStructType.java b/fe/src/main/java/com/cloudera/impala/analysis/CollectionStructType.java
deleted file mode 100644
index b45b856..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CollectionStructType.java
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import jline.internal.Preconditions;
-
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.google.common.collect.Lists;
-
-/**
- * Generated struct type describing the fields of a collection type
- * that can be referenced in paths.
- *
- * Parent Type      CollectionStructType
- * array<i>    -->  struct<item:i,pos:bigint>
- * map<k,v>    -->  struct<key:k,value:v>
- */
-public class CollectionStructType extends StructType {
-  // True if this struct describes the fields of a map,
-  // false if it describes the fields of an array.
-  private final boolean isMapStruct_;
-
-  // Field that can be skipped by implicit paths if its type is a struct.
-  private final StructField optionalField_;
-
-  private CollectionStructType(ArrayList<StructField> fields, boolean isMapStruct) {
-    super(fields);
-    isMapStruct_ = isMapStruct;
-    if (isMapStruct_) {
-      optionalField_ = getField(Path.MAP_VALUE_FIELD_NAME);
-    } else {
-      optionalField_ = getField(Path.ARRAY_ITEM_FIELD_NAME);
-    }
-    Preconditions.checkNotNull(optionalField_);
-  }
-
-  public static CollectionStructType createArrayStructType(ArrayType arrayType) {
-    Type itemType = arrayType.getItemType();
-    ArrayList<StructField> fields = Lists.newArrayListWithCapacity(2);
-    // The item field name comes before the pos field name so that a path to the
-    // stored item corresponds to its physical path.
-    fields.add(new StructField(Path.ARRAY_ITEM_FIELD_NAME, itemType));
-    fields.add(new StructField(Path.ARRAY_POS_FIELD_NAME, ScalarType.BIGINT));
-    return new CollectionStructType(fields, false);
-  }
-
-  public static CollectionStructType createMapStructType(MapType mapType) {
-    ArrayList<StructField> mapFields = Lists.newArrayListWithCapacity(2);
-    mapFields.add(new StructField(Path.MAP_KEY_FIELD_NAME, mapType.getKeyType()));
-    mapFields.add(new StructField(Path.MAP_VALUE_FIELD_NAME, mapType.getValueType()));
-    return new CollectionStructType(mapFields, true);
-  }
-
-  public StructField getOptionalField() { return optionalField_; }
-  public boolean isMapStruct() { return isMapStruct_; }
-  public boolean isArrayStruct() { return !isMapStruct_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CollectionTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CollectionTableRef.java b/fe/src/main/java/com/cloudera/impala/analysis/CollectionTableRef.java
deleted file mode 100644
index 8abed3e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CollectionTableRef.java
+++ /dev/null
@@ -1,138 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-
-/**
- * Reference to a MAP or ARRAY collection type that implies its
- * flattening during execution.
- * TODO: We currently create a new slot in the root tuple descriptor for every
- * relative collection ref, even if they have the same path. The BE currently relies on
- * this behavior for setting collection slots to NULL after they have been unnested
- * inside a SubplanNode. We could instead share the slot and the corresponding item tuple
- * descriptor among all collection table refs with the same path. This change will
- * require decoupling tuple descriptors from table aliases, i.e., a tuple descriptor
- * should be able to back multiple aliases.
- */
-public class CollectionTableRef extends TableRef {
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // Expr that returns the referenced collection. Typically a SlotRef into the
-  // parent scan's tuple. Result of analysis. Fully resolved against base tables.
-  private Expr collectionExpr_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  /**
-   * Create a CollectionTableRef from the original unresolved table ref as well as
-   * its resolved path. Sets table aliases and join-related attributes.
-   */
-  public CollectionTableRef(TableRef tableRef, Path resolvedPath) {
-    super(tableRef);
-    Preconditions.checkState(resolvedPath.isResolved());
-    resolvedPath_ = resolvedPath;
-    // Use the last path element as an implicit alias if no explicit alias was given.
-    if (hasExplicitAlias()) return;
-    String implicitAlias = rawPath_.get(rawPath_.size() - 1).toLowerCase();
-    aliases_ = new String[] { implicitAlias };
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  public CollectionTableRef(CollectionTableRef other) {
-    super(other);
-    collectionExpr_ =
-        (other.collectionExpr_ != null) ? other.collectionExpr_.clone() : null;
-  }
-
-  /**
-   * Registers this collection table ref with the given analyzer and adds a slot
-   * descriptor for the materialized collection to be populated by parent scan.
-   * Also determines whether this collection table ref is correlated or not.
-   *
-   * If this function is called in the context of analyzing a WITH clause, then
-   * no slot is added to the parent descriptor so as to not pollute the analysis
-   * state of the parent block (the WITH-clause analyzer is discarded, and the
-   * parent analyzer could have an entirely different global state).
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    desc_ = analyzer.registerTableRef(this);
-    if (isRelative() && !analyzer.isWithClause()) {
-      SlotDescriptor parentSlotDesc = analyzer.registerSlotRef(resolvedPath_);
-      parentSlotDesc.setItemTupleDesc(desc_);
-      collectionExpr_ = new SlotRef(parentSlotDesc);
-      // Must always be materialized to ensure the correct cardinality after unnesting.
-      analyzer.materializeSlots(collectionExpr_);
-      Analyzer parentAnalyzer =
-          analyzer.findAnalyzer(resolvedPath_.getRootDesc().getId());
-      Preconditions.checkNotNull(parentAnalyzer);
-      if (parentAnalyzer != analyzer) {
-        TableRef parentRef =
-            parentAnalyzer.getTableRef(resolvedPath_.getRootDesc().getId());
-        Preconditions.checkNotNull(parentRef);
-        // InlineViews are currently not supported as a parent ref.
-        Preconditions.checkState(!(parentRef instanceof InlineViewRef));
-        correlatedTupleIds_.add(parentRef.getId());
-      }
-    }
-    if (!isRelative()) {
-      // Register a table-level privilege request as well as a column-level privilege request
-      // for the collection-typed column.
-      Preconditions.checkNotNull(resolvedPath_.getRootTable());
-      analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), analyzer);
-      analyzer.registerPrivReq(new PrivilegeRequestBuilder().
-          allOf(Privilege.SELECT).onColumn(desc_.getTableName().getDb(),
-          desc_.getTableName().getTbl(), desc_.getPath().getRawPath().get(0))
-          .toRequest());
-    }
-    isAnalyzed_ = true;
-    analyzeHints(analyzer);
-
-    // TODO: For joins on nested collections some join ops can be simplified
-    // due to the containment relationship of the parent and child. For example,
-    // a FULL OUTER JOIN would become a LEFT OUTER JOIN, or a RIGHT SEMI JOIN
-    // would become an INNER or CROSS JOIN.
-    analyzeJoin(analyzer);
-  }
-
-  @Override
-  public boolean isRelative() {
-    Preconditions.checkNotNull(resolvedPath_);
-    return resolvedPath_.getRootDesc() != null;
-  }
-
-  public Expr getCollectionExpr() { return collectionExpr_; }
-
-  @Override
-  protected CollectionTableRef clone() { return new CollectionTableRef(this); }
-
-  @Override
-  public void reset() {
-    super.reset();
-    collectionExpr_ = null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ColumnDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ColumnDef.java b/fe/src/main/java/com/cloudera/impala/analysis/ColumnDef.java
deleted file mode 100644
index e7a3170..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ColumnDef.java
+++ /dev/null
@@ -1,143 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.util.MetaStoreUtil;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a column definition in a CREATE/ALTER TABLE/VIEW statement.
- * Column definitions in CREATE/ALTER TABLE statements require a column type,
- * whereas column definitions in CREATE/ALTER VIEW statements infer the column type from
- * the corresponding view definition. All column definitions have an optional comment.
- * Since a column definition refers a column stored in the Metastore, the column name
- * must be valid according to the Metastore's rules (see @MetaStoreUtils).
- */
-public class ColumnDef {
-  private final String colName_;
-  private String comment_;
-
-  // Required in CREATE/ALTER TABLE stmts. Set to NULL in CREATE/ALTER VIEW stmts,
-  // for which we setType() after analyzing the defining view definition stmt.
-  private final TypeDef typeDef_;
-  private Type type_;
-
-  public ColumnDef(String colName, TypeDef typeDef, String comment) {
-    colName_ = colName.toLowerCase();
-    typeDef_ = typeDef;
-    comment_ = comment;
-  }
-
-  /**
-   * Creates an analyzed ColumnDef from a Hive FieldSchema. Throws if the FieldSchema's
-   * type is not supported.
-   */
-  private ColumnDef(FieldSchema fs) throws AnalysisException {
-    Type type = Type.parseColumnType(fs.getType());
-    if (type == null) {
-      throw new AnalysisException(String.format(
-          "Unsupported type '%s' in Hive field schema '%s'",
-          fs.getType(), fs.getName()));
-    }
-    colName_ = fs.getName();
-    typeDef_ = new TypeDef(type);
-    comment_ = fs.getComment();
-    analyze();
-  }
-
-  public void setType(Type type) { type_ = type; }
-  public Type getType() { return type_; }
-  public TypeDef getTypeDef() { return typeDef_; }
-  public String getColName() { return colName_; }
-  public void setComment(String comment) { comment_ = comment; }
-  public String getComment() { return comment_; }
-
-  public void analyze() throws AnalysisException {
-    // Check whether the column name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(colName_)) {
-      throw new AnalysisException("Invalid column/field name: " + colName_);
-    }
-    if (typeDef_ != null) {
-      typeDef_.analyze(null);
-      type_ = typeDef_.getType();
-    }
-    Preconditions.checkNotNull(type_);
-    Preconditions.checkState(type_.isValid());
-    // Check HMS constraints of type and comment.
-    String typeSql = type_.toSql();
-    if (typeSql.length() > MetaStoreUtil.MAX_TYPE_NAME_LENGTH) {
-      throw new AnalysisException(String.format(
-          "Type of column '%s' exceeds maximum type length of %d characters:\n" +
-          "%s has %d characters.", colName_, MetaStoreUtil.MAX_TYPE_NAME_LENGTH,
-          typeSql, typeSql.length()));
-    }
-    if (comment_ != null &&
-        comment_.length() > MetaStoreUtil.CREATE_MAX_COMMENT_LENGTH) {
-      throw new AnalysisException(String.format(
-          "Comment of column '%s' exceeds maximum length of %d characters:\n" +
-          "%s has %d characters.", colName_, MetaStoreUtil.CREATE_MAX_COMMENT_LENGTH,
-          comment_, comment_.length()));
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder(colName_);
-    if (type_ != null) {
-      sb.append(" " + type_.toString());
-    } else {
-      sb.append(" " + typeDef_.toString());
-    }
-    if (comment_ != null) sb.append(String.format(" COMMENT '%s'", comment_));
-    return sb.toString();
-  }
-
-  public TColumn toThrift() {
-    TColumn col = new TColumn(new TColumn(getColName(), type_.toThrift()));
-    col.setComment(getComment());
-    return col;
-  }
-
-  public static List<ColumnDef> createFromFieldSchemas(List<FieldSchema> fieldSchemas)
-      throws AnalysisException {
-    List<ColumnDef> result = Lists.newArrayListWithCapacity(fieldSchemas.size());
-    for (FieldSchema fs: fieldSchemas) result.add(new ColumnDef(fs));
-    return result;
-  }
-
-  public static List<FieldSchema> toFieldSchemas(List<ColumnDef> colDefs) {
-    return Lists.transform(colDefs, new Function<ColumnDef, FieldSchema>() {
-      public FieldSchema apply(ColumnDef colDef) {
-        Preconditions.checkNotNull(colDef.getType());
-        return new FieldSchema(colDef.getColName(), colDef.getType().toSql(),
-            colDef.getComment());
-      }
-    });
-  }
-
-}



[52/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/split-hbase.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/split-hbase.sh b/testdata/bin/split-hbase.sh
index 5fa9195..909ef2a 100755
--- a/testdata/bin/split-hbase.sh
+++ b/testdata/bin/split-hbase.sh
@@ -40,7 +40,7 @@ export CLASSPATH=$IMPALA_HOME/testdata/target/impala-testdata-0.1-SNAPSHOT.jar:$
 : ${JAVA_KERBEROS_MAGIC=}
 for ATTEMPT in {1..10}; do
   if "$JAVA" ${JAVA_KERBEROS_MAGIC} \
-      com.cloudera.impala.datagenerator.HBaseTestDataRegionAssigment \
+      org.apache.impala.datagenerator.HBaseTestDataRegionAssigment \
       functional_hbase.alltypesagg functional_hbase.alltypessmall; then
     break
   fi

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/data/README
----------------------------------------------------------------------
diff --git a/testdata/data/README b/testdata/data/README
index 53993c8..3a0d5ec 100644
--- a/testdata/data/README
+++ b/testdata/data/README
@@ -43,7 +43,7 @@ bad_column_metadata.parquet:
 Generated with hacked version of parquet-mr 1.8.2-SNAPSHOT
 Schema:
  {"type": "record",
-  "namespace": "com.cloudera.impala",
+  "namespace": "org.apache.impala",
   "name": "bad_column_metadata",
   "fields": [
       {"name": "id", "type": ["null", "long"]},

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/data/schemas/nested/README
----------------------------------------------------------------------
diff --git a/testdata/data/schemas/nested/README b/testdata/data/schemas/nested/README
index 81688b4..479611d 100644
--- a/testdata/data/schemas/nested/README
+++ b/testdata/data/schemas/nested/README
@@ -1,6 +1,6 @@
 The two Parquet files (legacy_nested.parquet and modern_nested.parquet) were generated
 using the kite script located here:
-testdata/src/main/java/com/cloudera/impala/datagenerator/JsonToParquetConverter.java
+testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java
 
 The Parquet files can be regenerated by running the following commands in the testdata
 directory:
@@ -8,14 +8,14 @@ directory:
 mvn package
 
 mvn exec:java \
-  -Dexec.mainClass="com.cloudera.impala.datagenerator.JsonToParquetConverter" \
+  -Dexec.mainClass="org.apache.impala.datagenerator.JsonToParquetConverter" \
   -Dexec.args="--legacy_collection_format
   data/schemas/nested/nested.avsc
   data/schemas/nested/nested.json
   data/schemas/nested/legacy_nested.parquet"
 
 mvn exec:java \
-  -Dexec.mainClass="com.cloudera.impala.datagenerator.JsonToParquetConverter" \
+  -Dexec.mainClass="org.apache.impala.datagenerator.JsonToParquetConverter" \
   -Dexec.args="
   data/schemas/nested/nested.avsc
   data/schemas/nested/nested.json

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/data/schemas/nested/nested.avsc
----------------------------------------------------------------------
diff --git a/testdata/data/schemas/nested/nested.avsc b/testdata/data/schemas/nested/nested.avsc
index 174cd26..178c858 100644
--- a/testdata/data/schemas/nested/nested.avsc
+++ b/testdata/data/schemas/nested/nested.avsc
@@ -1,5 +1,5 @@
 {"type": "record",
-"namespace": "com.cloudera.impala",
+"namespace": "org.apache.impala",
 "name": "nested",
 "fields": [
   {"name": "t_long", "type": "long"},

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/parquet_schema_resolution/README
----------------------------------------------------------------------
diff --git a/testdata/parquet_schema_resolution/README b/testdata/parquet_schema_resolution/README
index 840067f..c279663 100644
--- a/testdata/parquet_schema_resolution/README
+++ b/testdata/parquet_schema_resolution/README
@@ -2,7 +2,7 @@ switched_map.parq was generated by modifying parquet-mr to switch the key and va
 of map, and then converting switched_map.json to parquet using switched_map.avsc as the
 schema. switched_map.parq has the following schema according to parquet-tools:
 
-message com.cloudera.impala.switched_map {
+message org.apache.impala.switched_map {
   required group int_map (MAP) {
     repeated group map (MAP_KEY_VALUE) {
       required int32 value;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/parquet_schema_resolution/switched_map.avsc
----------------------------------------------------------------------
diff --git a/testdata/parquet_schema_resolution/switched_map.avsc b/testdata/parquet_schema_resolution/switched_map.avsc
index 8805261..f687d87 100644
--- a/testdata/parquet_schema_resolution/switched_map.avsc
+++ b/testdata/parquet_schema_resolution/switched_map.avsc
@@ -1,6 +1,6 @@
 
 {"type": "record",
- "namespace": "com.cloudera.impala",
+ "namespace": "org.apache.impala",
  "name": "switched_map",
  "fields": [
      {"name": "int_map", "type": {"type": "map", "values": "int"}}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/pom.xml
----------------------------------------------------------------------
diff --git a/testdata/pom.xml b/testdata/pom.xml
index 32ff489..0b45fc0 100644
--- a/testdata/pom.xml
+++ b/testdata/pom.xml
@@ -27,7 +27,7 @@ under the License.
     <artifactId>parent</artifactId>
     <version>1.0</version>
   </parent>
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>impala-testdata</artifactId>
   <version>0.1-SNAPSHOT</version>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/src/main/java/org/apache/impala/datagenerator/CsvToHBaseConverter.java
----------------------------------------------------------------------
diff --git a/testdata/src/main/java/org/apache/impala/datagenerator/CsvToHBaseConverter.java b/testdata/src/main/java/org/apache/impala/datagenerator/CsvToHBaseConverter.java
index b3b2351..1bdbcc7 100644
--- a/testdata/src/main/java/org/apache/impala/datagenerator/CsvToHBaseConverter.java
+++ b/testdata/src/main/java/org/apache/impala/datagenerator/CsvToHBaseConverter.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.datagenerator;
+package org.apache.impala.datagenerator;
 
 import java.io.BufferedReader;
 import java.io.DataInputStream;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/src/main/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssigment.java
----------------------------------------------------------------------
diff --git a/testdata/src/main/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssigment.java b/testdata/src/main/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssigment.java
index 8132b9d..0e7f48e 100644
--- a/testdata/src/main/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssigment.java
+++ b/testdata/src/main/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssigment.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.datagenerator;
+package org.apache.impala.datagenerator;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java
----------------------------------------------------------------------
diff --git a/testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java b/testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java
index 3b4d51e..5abb2de 100644
--- a/testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java
+++ b/testdata/src/main/java/org/apache/impala/datagenerator/JsonToParquetConverter.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.datagenerator;
+package org.apache.impala.datagenerator;
 
 import java.io.File;
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/src/main/java/org/apache/impala/datagenerator/RandomNestedDataGenerator.java
----------------------------------------------------------------------
diff --git a/testdata/src/main/java/org/apache/impala/datagenerator/RandomNestedDataGenerator.java b/testdata/src/main/java/org/apache/impala/datagenerator/RandomNestedDataGenerator.java
index d6bab93..5cf65d2 100644
--- a/testdata/src/main/java/org/apache/impala/datagenerator/RandomNestedDataGenerator.java
+++ b/testdata/src/main/java/org/apache/impala/datagenerator/RandomNestedDataGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.datagenerator;
+package org.apache.impala.datagenerator;
 
 import java.io.File;
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/src/main/java/org/apache/impala/datagenerator/TestDataGenerator.java
----------------------------------------------------------------------
diff --git a/testdata/src/main/java/org/apache/impala/datagenerator/TestDataGenerator.java b/testdata/src/main/java/org/apache/impala/datagenerator/TestDataGenerator.java
index 2d2fd64..7753ac0 100644
--- a/testdata/src/main/java/org/apache/impala/datagenerator/TestDataGenerator.java
+++ b/testdata/src/main/java/org/apache/impala/datagenerator/TestDataGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.datagenerator;
+package org.apache.impala.datagenerator;
 
 import java.io.File;
 import java.io.FileWriter;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/workloads/functional-query/queries/QueryTest/load-java-udfs.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/load-java-udfs.test b/testdata/workloads/functional-query/queries/QueryTest/load-java-udfs.test
index 0df3fd4..3f86a9e 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/load-java-udfs.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/load-java-udfs.test
@@ -56,69 +56,69 @@ symbol='org.apache.hadoop.hive.ql.udf.UDFLower';
 # Used to test persistent java functions
 create function java_udfs_test.identity
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(boolean) returns boolean
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(tinyint) returns tinyint
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(smallint) returns smallint
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(int) returns int
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(bigint) returns bigint
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(float) returns float
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(double) returns double
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(string) returns string
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(string, string) returns string
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.identity(string, string, string) returns string
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.hive_add(int, int) returns int
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.hive_add(smallint, smallint) returns smallint
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.hive_add(float, float) returns float
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.hive_add(double, double) returns double
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.hive_add(boolean, boolean) returns boolean
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdf';
+symbol='org.apache.impala.TestUdf';
 
 create function udf_test.throws_exception() returns boolean
 location '$FILESYSTEM_PREFIX/test-warehouse/impala-hive-udfs.jar'
-symbol='com.cloudera.impala.TestUdfException';
+symbol='org.apache.impala.TestUdfException';
 ====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/workloads/functional-query/queries/QueryTest/show-data-sources.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/show-data-sources.test b/testdata/workloads/functional-query/queries/QueryTest/show-data-sources.test
index 97c624e..e06cfee 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/show-data-sources.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/show-data-sources.test
@@ -3,8 +3,8 @@
 # Test show data sources
 show data sources
 ---- RESULTS: VERIFY_IS_SUBSET
-'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','com.cloudera.impala.extdatasource.AllTypesDataSource','V1'
-'show_test_ds2','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','com.cloudera.impala.extdatasource.AllTypesDataSource','V1'
+'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','org.apache.impala.extdatasource.AllTypesDataSource','V1'
+'show_test_ds2','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','org.apache.impala.extdatasource.AllTypesDataSource','V1'
 ---- LABELS
 NAME, LOCATION, CLASS NAME, API VERSION
 ---- TYPES
@@ -13,8 +13,8 @@ STRING, STRING, STRING, STRING
 ---- QUERY
 show data sources like 'show_test_ds*'
 ---- RESULTS
-'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','com.cloudera.impala.extdatasource.AllTypesDataSource','V1'
-'show_test_ds2','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','com.cloudera.impala.extdatasource.AllTypesDataSource','V1'
+'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','org.apache.impala.extdatasource.AllTypesDataSource','V1'
+'show_test_ds2','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','org.apache.impala.extdatasource.AllTypesDataSource','V1'
 ---- LABELS
 NAME, LOCATION, CLASS NAME, API VERSION
 ---- TYPES
@@ -23,7 +23,7 @@ STRING, STRING, STRING, STRING
 ---- QUERY
 show data sources like 'show_test_ds1'
 ---- RESULTS
-'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','com.cloudera.impala.extdatasource.AllTypesDataSource','V1'
+'show_test_ds1','$NAMENODE/test-warehouse/data-sources/test-data-source.jar','org.apache.impala.extdatasource.AllTypesDataSource','V1'
 ---- LABELS
 NAME, LOCATION, CLASS NAME, API VERSION
 ---- TYPES

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/custom_cluster/test_permanent_udfs.py
----------------------------------------------------------------------
diff --git a/tests/custom_cluster/test_permanent_udfs.py b/tests/custom_cluster/test_permanent_udfs.py
index 2f2457a..c979cd1 100644
--- a/tests/custom_cluster/test_permanent_udfs.py
+++ b/tests/custom_cluster/test_permanent_udfs.py
@@ -164,7 +164,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # restart the Catalog server. Catalog should ignore the
     # function load.
     self.run_stmt_in_hive("create function %s.corrupt_udf as \
-        'com.cloudera.impala.UnresolvedUdf' using jar '%s'"
+        'org.apache.impala.UnresolvedUdf' using jar '%s'"
         % (self.JAVA_FN_TEST_DB, self.JAVA_UDF_JAR))
     self.__restart_cluster()
     # Make sure the function count is 0
@@ -235,11 +235,11 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # persistent Java UDF with same name
     self.client.execute("create function %s.%s(boolean) returns boolean "\
         "location '%s' symbol='%s'" % (self.JAVA_FN_TEST_DB, "identity",
-        self.JAVA_UDF_JAR, "com.cloudera.impala.TestUdf"))
+        self.JAVA_UDF_JAR, "org.apache.impala.TestUdf"))
     result = self.execute_query_expect_failure(self.client,
         self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB,
         function="identity", location=self.JAVA_UDF_JAR,
-        symbol="com.cloudera.impala.TestUdf"))
+        symbol="org.apache.impala.TestUdf"))
     assert "Function already exists" in str(result)
     # Test the same with a NATIVE function
     self.client.execute("create function {database}.identity(int) "\
@@ -249,18 +249,18 @@ class TestUdfPersistence(CustomClusterTestSuite):
     result = self.execute_query_expect_failure(self.client,
         self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB,
         function="identity", location=self.JAVA_UDF_JAR,
-        symbol="com.cloudera.impala.TestUdf"))
+        symbol="org.apache.impala.TestUdf"))
     assert "Function already exists" in str(result)
 
     # Test the reverse. Add a persistent Java UDF and ensure we cannot
     # add non persistent Java UDFs or NATIVE functions with the same name.
     self.client.execute(self.CREATE_JAVA_UDF_TEMPLATE.format(
         db=self.JAVA_FN_TEST_DB, function="identity_java",
-        location=self.JAVA_UDF_JAR, symbol="com.cloudera.impala.TestUdf"))
+        location=self.JAVA_UDF_JAR, symbol="org.apache.impala.TestUdf"))
     result = self.execute_query_expect_failure(self.client, "create function "\
         "%s.%s(boolean) returns boolean location '%s' symbol='%s'" % (
         self.JAVA_FN_TEST_DB, "identity_java", self.JAVA_UDF_JAR,
-        "com.cloudera.impala.TestUdf"))
+        "org.apache.impala.TestUdf"))
     assert "Function already exists" in str(result)
     result = self.execute_query_expect_failure(self.client, "create function "\
         "{database}.identity_java(int) returns int location '{location}' "\
@@ -284,7 +284,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     compatibility_fn_count = 3
     self.client.execute(self.CREATE_JAVA_UDF_TEMPLATE.format(
         db=self.JAVA_FN_TEST_DB, function="compatibility",
-        location=self.JAVA_UDF_JAR, symbol="com.cloudera.impala.JavaUdfTest"))
+        location=self.JAVA_UDF_JAR, symbol="org.apache.impala.JavaUdfTest"))
     self.verify_function_count(
         "SHOW FUNCTIONS IN %s like 'compatibility*'" % self.JAVA_FN_TEST_DB,
         compatibility_fn_count)
@@ -316,7 +316,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # to Hive and Impala.
     result = self.execute_query_expect_failure(self.client,
         self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB, function="badudf",
-        location=self.JAVA_UDF_JAR, symbol="com.cloudera.impala.IncompatibleUdfTest"))
+        location=self.JAVA_UDF_JAR, symbol="org.apache.impala.IncompatibleUdfTest"))
     assert "No compatible function signatures" in str(result)
     self.verify_function_count(
         "SHOW FUNCTIONS IN %s like 'badudf*'" % self.JAVA_FN_TEST_DB, 0)
@@ -326,7 +326,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # Create the same function from hive and make sure Impala doesn't load any signatures.
     self.run_stmt_in_hive(self.CREATE_HIVE_UDF_TEMPLATE.format(
         db=self.JAVA_FN_TEST_DB, function="badudf",
-        location=self.JAVA_UDF_JAR, symbol="com.cloudera.impala.IncompatibleUdfTest"))
+        location=self.JAVA_UDF_JAR, symbol="org.apache.impala.IncompatibleUdfTest"))
     result = self.run_stmt_in_hive("DESCRIBE FUNCTION %s.%s"
         % (self.JAVA_FN_TEST_DB, "badudf"))
     assert "does not exist" not in str(result)
@@ -336,7 +336,7 @@ class TestUdfPersistence(CustomClusterTestSuite):
     # Add a function with the same name from Impala. It should fail.
     result = self.execute_query_expect_failure(self.client,
         self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB, function="badudf",
-        location=self.JAVA_UDF_JAR, symbol="com.cloudera.impala.TestUdf"))
+        location=self.JAVA_UDF_JAR, symbol="org.apache.impala.TestUdf"))
     assert "Function badudf already exists" in str(result)
     # Drop the function and make sure the function if dropped from hive
     self.client.execute(self.DROP_JAVA_UDF_TEMPLATE.format(

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/metadata/test_ddl.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_ddl.py b/tests/metadata/test_ddl.py
index 668b051..18ed1af 100644
--- a/tests/metadata/test_ddl.py
+++ b/tests/metadata/test_ddl.py
@@ -427,7 +427,7 @@ class TestLibCache(TestDdlBase):
     data_src_name = unique_database + "_datasrc"
     create_ds_stmt = ("CREATE DATA SOURCE {0} "
         "LOCATION '{1}/data-sources/test-data-source.jar' "
-        "CLASS 'com.cloudera.impala.extdatasource.AllTypesDataSource' "
+        "CLASS 'org.apache.impala.extdatasource.AllTypesDataSource' "
         "API_VERSION 'V1'".format(data_src_name, WAREHOUSE))
     create_tbl_stmt = ("CREATE TABLE {0}.data_src_tbl (x int) "
         "PRODUCED BY DATA SOURCE {1}('dummy_init_string')")\

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/metadata/test_metadata_query_statements.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_metadata_query_statements.py b/tests/metadata/test_metadata_query_statements.py
index 7b2f72f..0a51acb 100644
--- a/tests/metadata/test_metadata_query_statements.py
+++ b/tests/metadata/test_metadata_query_statements.py
@@ -33,7 +33,7 @@ class TestMetadataQueryStatements(ImpalaTestSuite):
 
   CREATE_DATA_SRC_STMT = ("CREATE DATA SOURCE %s LOCATION '" +
       get_fs_path("/test-warehouse/data-sources/test-data-source.jar") +
-      "' CLASS 'com.cloudera.impala.extdatasource.AllTypesDataSource' API_VERSION 'V1'")
+      "' CLASS 'org.apache.impala.extdatasource.AllTypesDataSource' API_VERSION 'V1'")
   DROP_DATA_SRC_STMT = "DROP DATA SOURCE IF EXISTS %s"
   TEST_DATA_SRC_NAMES = ["show_test_ds1", "show_test_ds2"]
   AVRO_SCHEMA_LOC = get_fs_path("/test-warehouse/avro_schemas/functional/alltypes.json")

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/query_test/test_udfs.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_udfs.py b/tests/query_test/test_udfs.py
index ada7712..02bdf4f 100644
--- a/tests/query_test/test_udfs.py
+++ b/tests/query_test/test_udfs.py
@@ -169,7 +169,7 @@ class TestUdfs(ImpalaTestSuite):
         'drop function if exists `{0}`.`udf_update_test_drop`()'.format(unique_database))
     create_fn_stmt = (
         "create function `{0}`.`udf_update_test_drop`() returns string LOCATION '{1}' "
-        "SYMBOL='com.cloudera.impala.TestUpdateUdf'".format(unique_database, udf_dst))
+        "SYMBOL='org.apache.impala.TestUpdateUdf'".format(unique_database, udf_dst))
     query_stmt = "select `{0}`.`udf_update_test_drop`()".format(unique_database)
 
     # Put the old UDF binary on HDFS, make the UDF in Impala and run it.
@@ -207,7 +207,7 @@ class TestUdfs(ImpalaTestSuite):
 
     create_fn_template = (
         "create function `{0}`.`{{0}}`() returns string LOCATION '{1}' "
-        "SYMBOL='com.cloudera.impala.TestUpdateUdf'".format(unique_database, udf_dst))
+        "SYMBOL='org.apache.impala.TestUpdateUdf'".format(unique_database, udf_dst))
 
     query_template = "select `{0}`.`{{0}}`()".format(unique_database)
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/pom.xml
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/pom.xml b/tests/test-hive-udfs/pom.xml
index 5e500ff..7f18c7b 100644
--- a/tests/test-hive-udfs/pom.xml
+++ b/tests/test-hive-udfs/pom.xml
@@ -21,7 +21,7 @@ under the License.
   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
-  <groupId>com.cloudera.impala</groupId>
+  <groupId>org.apache.impala</groupId>
   <artifactId>test-hive-udfs</artifactId>
   <version>1.0</version>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/IncompatibleUdfTest.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/IncompatibleUdfTest.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/IncompatibleUdfTest.java
index 561abaf..8a52c0c 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/IncompatibleUdfTest.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/IncompatibleUdfTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/JavaUdfTest.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/JavaUdfTest.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/JavaUdfTest.java
index 94418ff..b065665 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/JavaUdfTest.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/JavaUdfTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdf.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdf.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdf.java
index 5390116..6e60e6b 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdf.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdf.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdfException.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdfException.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdfException.java
index 6a62d3e..428ad28 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdfException.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUdfException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.BooleanWritable;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUpdateUdf.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUpdateUdf.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUpdateUdf.java
index 2d76d73..0366edf 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUpdateUdf.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/TestUpdateUdf.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/tests/test-hive-udfs/src/main/java/org/apache/impala/UnresolvedUdf.java
----------------------------------------------------------------------
diff --git a/tests/test-hive-udfs/src/main/java/org/apache/impala/UnresolvedUdf.java b/tests/test-hive-udfs/src/main/java/org/apache/impala/UnresolvedUdf.java
index 6487bab..2857c04 100644
--- a/tests/test-hive-udfs/src/main/java/org/apache/impala/UnresolvedUdf.java
+++ b/tests/test-hive-udfs/src/main/java/org/apache/impala/UnresolvedUdf.java
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package com.cloudera.impala;
+package org.apache.impala;
 
 // Import a class that is not shaded in the UDF jar.
 import com.google.i18n.phonenumbers.NumberParseException;


[57/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ExprId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprId.java b/fe/src/main/java/org/apache/impala/analysis/ExprId.java
index 52292f5..253fd11 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ExprId.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ExprId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class ExprId extends Id<ExprId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
index cbff71a..a2de730 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java b/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
index 48b9fb3..f49535c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ExtractFromExpr.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Set;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExtractField;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExtractField;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableSet;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/FromClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FromClause.java b/fe/src/main/java/org/apache/impala/analysis/FromClause.java
index bbe6f23..0526ca9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FromClause.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FromClause.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java b/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
index 998c5fc..ad387dc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionArgs.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
index d53aa9e..2653684 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
@@ -15,24 +15,24 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.thrift.TAggregateExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.TreeNode;
+import org.apache.impala.thrift.TAggregateExpr;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
+import org.apache.impala.thrift.TFunctionBinaryType;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/FunctionName.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionName.java b/fe/src/main/java/org/apache/impala/analysis/FunctionName.java
index 5609578..00d702c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionName.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionName.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionName;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TFunctionName;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/FunctionParams.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionParams.java b/fe/src/main/java/org/apache/impala/analysis/FunctionParams.java
index e6854a0..4a1effb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionParams.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionParams.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/GrantRevokePrivStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/GrantRevokePrivStmt.java b/fe/src/main/java/org/apache/impala/analysis/GrantRevokePrivStmt.java
index edaf22a..d2c2040 100644
--- a/fe/src/main/java/org/apache/impala/analysis/GrantRevokePrivStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/GrantRevokePrivStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TPrivilege;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TGrantRevokePrivParams;
+import org.apache.impala.thrift.TPrivilege;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/GrantRevokeRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/GrantRevokeRoleStmt.java b/fe/src/main/java/org/apache/impala/analysis/GrantRevokeRoleStmt.java
index 73240dc..dfcf519 100644
--- a/fe/src/main/java/org/apache/impala/analysis/GrantRevokeRoleStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/GrantRevokeRoleStmt.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TGrantRevokeRoleParams;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/HdfsCachingOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/HdfsCachingOp.java b/fe/src/main/java/org/apache/impala/analysis/HdfsCachingOp.java
index 1b6cff2..0ee274c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/HdfsCachingOp.java
+++ b/fe/src/main/java/org/apache/impala/analysis/HdfsCachingOp.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 
-import com.cloudera.impala.catalog.HdfsCachePool;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.THdfsCachingOp;
+import org.apache.impala.catalog.HdfsCachePool;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.THdfsCachingOp;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/HdfsUri.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/HdfsUri.java b/fe/src/main/java/org/apache/impala/analysis/HdfsUri.java
index 9fbe467..693c211 100644
--- a/fe/src/main/java/org/apache/impala/analysis/HdfsUri.java
+++ b/fe/src/main/java/org/apache/impala/analysis/HdfsUri.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.IOException;
 
@@ -23,12 +23,12 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 
-import com.cloudera.impala.authorization.AuthorizeableUri;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.util.FsPermissionChecker;
+import org.apache.impala.authorization.AuthorizeableUri;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.util.FsPermissionChecker;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/InPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InPredicate.java b/fe/src/main/java/org/apache/impala/analysis/InPredicate.java
index 28d8f12..4598638 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InPredicate.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Reference;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index a6c62b0..94f5e76 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -24,11 +24,11 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index c5965db..250d2d2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -15,30 +15,30 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
-import com.cloudera.impala.planner.TableSink;
+import org.apache.impala.planner.TableSink;
 import com.google.common.collect.ImmutableList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.planner.DataSink;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.planner.DataSink;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/IsNotEmptyPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/IsNotEmptyPredicate.java b/fe/src/main/java/org/apache/impala/analysis/IsNotEmptyPredicate.java
index fd07a32..20c73b7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/IsNotEmptyPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/IsNotEmptyPredicate.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java b/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
index 6a75b5f..9092a32 100644
--- a/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/IsNullPredicate.java
@@ -15,19 +15,19 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+package org.apache.impala.analysis;
+
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Reference;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/JoinOperator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/JoinOperator.java b/fe/src/main/java/org/apache/impala/analysis/JoinOperator.java
index d50861b..f79e490 100644
--- a/fe/src/main/java/org/apache/impala/analysis/JoinOperator.java
+++ b/fe/src/main/java/org/apache/impala/analysis/JoinOperator.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.thrift.TJoinOp;
+import org.apache.impala.thrift.TJoinOp;
 
 public enum JoinOperator {
   INNER_JOIN("INNER JOIN", TJoinOp.INNER_JOIN),

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/LikePredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LikePredicate.java b/fe/src/main/java/org/apache/impala/analysis/LikePredicate.java
index cb006bf..8a60af4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LikePredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LikePredicate.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/LimitElement.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LimitElement.java b/fe/src/main/java/org/apache/impala/analysis/LimitElement.java
index 4de9501..30cb4dc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LimitElement.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LimitElement.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TColumnValue;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java b/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
index f5eedbb..84368f5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LiteralExpr.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TQueryCtx;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TQueryCtx;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index 109d70a..b995cf0 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -27,18 +27,18 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.TAccessLevelUtil;
-import com.cloudera.impala.util.FsPermissionChecker;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.TLoadDataReq;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.util.TAccessLevelUtil;
+import org.apache.impala.util.FsPermissionChecker;
 import com.google.common.base.Preconditions;
 
 /*
@@ -85,7 +85,7 @@ public class LoadDataStmt extends StatementBase {
 
   /*
    * Print SQL syntax corresponding to this node.
-   * @see com.cloudera.impala.parser.ParseNode#toSql()
+   * @see org.apache.impala.parser.ParseNode#toSql()
    */
   @Override
   public String toSql() {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
index 66f97f5..24b8417 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ModifyStmt.java
@@ -15,21 +15,21 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.planner.DataSink;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/NullLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/NullLiteral.java b/fe/src/main/java/org/apache/impala/analysis/NullLiteral.java
index 212d601..ef88154 100644
--- a/fe/src/main/java/org/apache/impala/analysis/NullLiteral.java
+++ b/fe/src/main/java/org/apache/impala/analysis/NullLiteral.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/NumericLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/NumericLiteral.java b/fe/src/main/java/org/apache/impala/analysis/NumericLiteral.java
index f3bc9da..038cd9b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/NumericLiteral.java
+++ b/fe/src/main/java/org/apache/impala/analysis/NumericLiteral.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.thrift.TDecimalLiteral;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TFloatLiteral;
-import com.cloudera.impala.thrift.TIntLiteral;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.thrift.TDecimalLiteral;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
+import org.apache.impala.thrift.TFloatLiteral;
+import org.apache.impala.thrift.TIntLiteral;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/OrderByElement.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/OrderByElement.java b/fe/src/main/java/org/apache/impala/analysis/OrderByElement.java
index d430152..4dd90c1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/OrderByElement.java
+++ b/fe/src/main/java/org/apache/impala/analysis/OrderByElement.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ParseNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ParseNode.java b/fe/src/main/java/org/apache/impala/analysis/ParseNode.java
index 6f54e26..d4ec21d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ParseNode.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ParseNode.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.common.AnalysisException;
 
 public interface ParseNode {
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
index 90c44f2..4289108 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/PartitionListItem.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionListItem.java b/fe/src/main/java/org/apache/impala/analysis/PartitionListItem.java
index 1ffc51e..4870d28 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionListItem.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionListItem.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 /**
  * Representation of a single column:value element in the PARTITION (...) clause of an insert

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
index 92bf0ae..39cc1bb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TPartitionKeyValue;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/Path.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Path.java b/fe/src/main/java/org/apache/impala/analysis/Path.java
index 03c601c..89f3409 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Path.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Path.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/Predicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Predicate.java b/fe/src/main/java/org/apache/impala/analysis/Predicate.java
index 4fadbce..eacf0c5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Predicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Predicate.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.Reference;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.common.Reference;
 
 public abstract class Predicate extends Expr {
   protected boolean isEqJoinConjunct_;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java b/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
index 2948e58..f064604 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PrivilegeSpec.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.RolePrivilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TPrivilegeScope;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
index 791305b..b02bc73 100644
--- a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Set;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.TreeNode;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java b/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
index ac6c390..9d2249b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ResetMetadataStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TResetMetadataRequest;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SelectList.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectList.java b/fe/src/main/java/org/apache/impala/analysis/SelectList.java
index 429d488..4297f94 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectList.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectList.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SelectListItem.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectListItem.java b/fe/src/main/java/org/apache/impala/analysis/SelectListItem.java
index 96831f3..80aee25 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectListItem.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectListItem.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index 864dcc8..9585f47 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -25,16 +25,16 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ColumnAliasGenerator;
-import com.cloudera.impala.common.TableAliasGenerator;
-import com.cloudera.impala.common.TreeNode;
+import org.apache.impala.analysis.Path.PathType;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ColumnAliasGenerator;
+import org.apache.impala.common.TableAliasGenerator;
+import org.apache.impala.common.TreeNode;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterables;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SetStmt.java b/fe/src/main/java/org/apache/impala/analysis/SetStmt.java
index d44f5b4..87c6727 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SetStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SetStmt.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.thrift.TSetQueryOptionRequest;
+import org.apache.impala.thrift.TSetQueryOptionRequest;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
index e3ac551..37e32ae 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowCreateFunctionStmt.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TGetFunctionsParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TGetFunctionsParams;
+import org.apache.impala.thrift.TFunctionCategory;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
index 68b593c..13206c9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowCreateTableStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowDataSrcsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowDataSrcsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowDataSrcsStmt.java
index 291c467..aafad7d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowDataSrcsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowDataSrcsStmt.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowDataSrcsParams;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TShowDataSrcsParams;
 
 /**
  * Representation of a SHOW DATA SOURCES [pattern] statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowDbsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowDbsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowDbsStmt.java
index 40c3cc8..1d35619 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowDbsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowDbsStmt.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowDbsParams;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TShowDbsParams;
 
 /**
  * Representation of a SHOW DATABASES [pattern] statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
index 0bfc46b..bec1ed3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TShowFilesParams;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowFunctionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowFunctionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowFunctionsStmt.java
index 33129d2..dc6a461 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowFunctionsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowFunctionsStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TShowFunctionsParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TShowFunctionsParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowGrantRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowGrantRoleStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowGrantRoleStmt.java
index b8fe737..e59f168 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowGrantRoleStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowGrantRoleStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TShowGrantRoleParams;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TShowGrantRoleParams;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java
index 69cd7a1..9ade66b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowPartitionsStmt.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowRolesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowRolesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowRolesStmt.java
index 516edfe..b9f37bc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowRolesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowRolesStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TShowRolesParams;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TShowRolesParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
index a421b71..79095ea 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowStatsStmt.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowStatsParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TShowStatsParams;
 
 /**
  * Representation of a SHOW TABLE/COLUMN STATS statement for

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ShowTablesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowTablesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ShowTablesStmt.java
index 22767b5..a6a23c9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowTablesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowTablesStmt.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TShowTablesParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TShowTablesParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SingularRowSrcTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SingularRowSrcTableRef.java b/fe/src/main/java/org/apache/impala/analysis/SingularRowSrcTableRef.java
index 0a5d331..be9753a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SingularRowSrcTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SingularRowSrcTableRef.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.planner.PlanNode;
+import org.apache.impala.planner.PlanNode;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SlotDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SlotDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/SlotDescriptor.java
index 7850a0e..3a0fc06 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SlotDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SlotDescriptor.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Collections;
 import java.util.List;
 
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.KuduColumn;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.thrift.TSlotDescriptor;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.KuduColumn;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.thrift.TSlotDescriptor;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SlotId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SlotId.java b/fe/src/main/java/org/apache/impala/analysis/SlotId.java
index 36e88e7..c5ca1f4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SlotId.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SlotId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class SlotId extends Id<SlotId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
index 8a544d0..36771f3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,14 +24,14 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TSlotRef;
+import org.apache.impala.analysis.Path.PathType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
+import org.apache.impala.thrift.TSlotRef;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/SortInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/SortInfo.java b/fe/src/main/java/org/apache/impala/analysis/SortInfo.java
index 469f1e6..8d8f955 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SortInfo.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SortInfo.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/StatementBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/StatementBase.java b/fe/src/main/java/org/apache/impala/analysis/StatementBase.java
index 9a6cb1b..9faaddd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StatementBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StatementBase.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.commons.lang.NotImplementedException;
 
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 
 /**
  * Base class for all Impala SQL statements.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java b/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
index 8304c66..bc18dfd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StmtRewriter.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,9 +23,9 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AnalysisContext.AnalysisResult;
-import com.cloudera.impala.analysis.UnionStmt.UnionOperand;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.analysis.AnalysisContext.AnalysisResult;
+import org.apache.impala.analysis.UnionStmt.UnionOperand;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterables;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/StringLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/StringLiteral.java b/fe/src/main/java/org/apache/impala/analysis/StringLiteral.java
index be4820c..4cca49e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StringLiteral.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StringLiteral.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.IOException;
 import java.io.StringReader;
@@ -25,12 +25,12 @@ import java_cup.runtime.Symbol;
 
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TStringLiteral;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
+import org.apache.impala.thrift.TStringLiteral;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/Subquery.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Subquery.java b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
index d0e1b30..f3dcb7c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Subquery.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
@@ -23,11 +23,11 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TableName.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableName.java b/fe/src/main/java/org/apache/impala/analysis/TableName.java
index 5fc8f7e..297948e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableName.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableName.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index 0ff0575..c276dba 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.thrift.TReplicaPreference;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.planner.JoinNode.DistributionMode;
+import org.apache.impala.thrift.TReplicaPreference;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TimestampArithmeticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TimestampArithmeticExpr.java b/fe/src/main/java/org/apache/impala/analysis/TimestampArithmeticExpr.java
index 5049064..8faeb27 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TimestampArithmeticExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TimestampArithmeticExpr.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.HashMap;
 import java.util.Map;
 
-import com.cloudera.impala.analysis.ArithmeticExpr.Operator;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.analysis.ArithmeticExpr.Operator;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 851e6ba..b125987 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -15,30 +15,30 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import com.cloudera.impala.catalog.KuduTable;
+import org.apache.impala.catalog.KuduTable;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.Token;
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.ql.parse.HiveLexer;
 
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsCompression;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.PrintUtils;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsCompression;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.PrintUtils;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
index 0ea930e..0f1c9ce 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TTruncateParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.thrift.TTruncateParams;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index 145a10b..cbfdaca 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -25,12 +25,12 @@ import java.util.Map;
 
 import org.apache.commons.lang.StringUtils;
 
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.thrift.TTupleDescriptor;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.thrift.TTupleDescriptor;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TupleId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleId.java b/fe/src/main/java/org/apache/impala/analysis/TupleId.java
index 918671c..2464506 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleId.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 /**
  * Tuple identifier unique within a single query.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TupleIsNullPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleIsNullPredicate.java b/fe/src/main/java/org/apache/impala/analysis/TupleIsNullPredicate.java
index 5a07328..49ebc4e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleIsNullPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleIsNullPredicate.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 import java.util.Set;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TTupleIsNullPredicate;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
+import org.apache.impala.thrift.TTupleIsNullPredicate;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
index bbbb1b4..76e6a8f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/TypesUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TypesUtil.java b/fe/src/main/java/org/apache/impala/analysis/TypesUtil.java
index 58bb960..4a61f38 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TypesUtil.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TypesUtil.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
 // Utility class for handling types.



[45/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ColumnLineageGraph.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ColumnLineageGraph.java b/fe/src/main/java/com/cloudera/impala/analysis/ColumnLineageGraph.java
deleted file mode 100644
index a00bf53..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ColumnLineageGraph.java
+++ /dev/null
@@ -1,680 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.text.SimpleDateFormat;
-import java.util.Collection;
-import java.util.Date;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.json.simple.JSONArray;
-import org.json.simple.JSONObject;
-import org.json.simple.JSONValue;
-import org.json.simple.parser.JSONParser;
-import org.json.simple.parser.ParseException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TEdgeType;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TMultiEdge;
-import com.cloudera.impala.thrift.TVertex;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.hash.Hasher;
-import com.google.common.hash.Hashing;
-
-/**
- * Represents a vertex in the column lineage graph. A Vertex may correspond to a base
- * table column, a column in the destination table (for the case of INSERT or CTAS
- * queries) or a result expr (labeled column of a query result set).
- */
-final class Vertex implements Comparable<Vertex> {
-  // Unique identifier of this vertex.
-  private final VertexId id_;
-
-  private final String type_ = "COLUMN";
-
-  // A fully-qualified column name or the label of a result expr
-  private final String label_;
-
-  public Vertex(VertexId id, String label) {
-    Preconditions.checkNotNull(id);
-    Preconditions.checkNotNull(label);
-    id_ = id;
-    label_ = label;
-  }
-  public VertexId getVertexId() { return id_; }
-  public String getLabel() { return label_; }
-  public String getType() { return type_; }
-
-  @Override
-  public String toString() { return "(" + id_ + ":" + type_ + ":" + label_ + ")"; }
-
-  /**
-   * Encodes this Vertex object into a JSON object represented by a Map.
-   */
-  public Map toJson() {
-    // Use a LinkedHashMap to generate a strict ordering of elements.
-    Map obj = new LinkedHashMap();
-    obj.put("id", id_.asInt());
-    obj.put("vertexType", type_);
-    obj.put("vertexId", label_);
-    return obj;
-  }
-
-  /**
-   * Constructs a Vertex object from a JSON object. The new object is returned.
-   */
-  public static Vertex fromJsonObj(JSONObject obj) {
-    int id = ((Long) obj.get("id")).intValue();
-    String label = (String) obj.get("vertexId");
-    return new Vertex(new VertexId(id), label);
-  }
-
-  /**
-   * Encodes this Vertex object into a thrift object
-   */
-  public TVertex toThrift() {
-    return new TVertex(id_.asInt(), label_);
-  }
-
-  /**
-   * Constructs a Vertex object from a thrift object.
-   */
-  public static Vertex fromThrift(TVertex vertex) {
-    int id = ((Long) vertex.id).intValue();
-    return new Vertex(new VertexId(id), vertex.label);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    Vertex vertex = (Vertex) obj;
-    return this.id_.equals(vertex.id_) &&
-        this.label_.equals(vertex.label_);
-  }
-
-  public int compareTo(Vertex cmp) { return this.id_.compareTo(cmp.id_); }
-
-  @Override
-  public int hashCode() { return id_.hashCode(); }
-}
-
-/**
- * Represents the unique identifier of a Vertex.
- */
-class VertexId extends Id<VertexId> {
-  protected VertexId(int id) {
-    super(id);
-  }
-  public static IdGenerator<VertexId> createGenerator() {
-    return new IdGenerator<VertexId>() {
-      @Override
-      public VertexId getNextId() { return new VertexId(nextId_++); }
-      @Override
-      public VertexId getMaxId() { return new VertexId(nextId_ - 1); }
-    };
-  }
-}
-
-/**
- * Represents a set of uni-directional edges in the column lineage graph, one edge from
- * every source Vertex in 'sources_' to every target Vertex in 'targets_'. An edge
- * indicates a dependency between a source and a target Vertex. There are two types of
- * edges, PROJECTION and PREDICATE, that are described in the ColumnLineageGraph class.
- */
-final class MultiEdge {
-  public static enum EdgeType {
-    PROJECTION, PREDICATE
-  }
-  private final Set<Vertex> sources_;
-  private final Set<Vertex> targets_;
-  private final EdgeType edgeType_;
-
-  public MultiEdge(Set<Vertex> sources, Set<Vertex> targets, EdgeType type) {
-    sources_ = sources;
-    targets_ = targets;
-    edgeType_ = type;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    Joiner joiner = Joiner.on(",");
-    builder.append("Sources: [");
-    builder.append(joiner.join(sources_) + "]\n");
-    builder.append("Targets: [");
-    builder.append(joiner.join(targets_) + "]\n");
-    builder.append("Type: " + edgeType_);
-    return builder.toString();
-  }
-
-  /**
-   * Encodes this MultiEdge object to a JSON object represented by a Map.
-   */
-  public Map toJson() {
-    Map obj = new LinkedHashMap();
-    // Add sources
-    JSONArray sourceIds = new JSONArray();
-    for (Vertex vertex: sources_) {
-      sourceIds.add(vertex.getVertexId());
-    }
-    obj.put("sources", sourceIds);
-    // Add targets
-    JSONArray targetIds = new JSONArray();
-    for (Vertex vertex: targets_) {
-      targetIds.add(vertex.getVertexId());
-    }
-    obj.put("targets", targetIds);
-    obj.put("edgeType", edgeType_.toString());
-    return obj;
-  }
-
-  /**
-   * Encodes this MultiEdge object to a thrift object
-   */
-  public TMultiEdge toThrift() {
-    List<TVertex> sources = Lists.newArrayList();
-    for (Vertex vertex: sources_) {
-      sources.add(vertex.toThrift());
-    }
-    List<TVertex> targets = Lists.newArrayList();
-    for (Vertex vertex: targets_) {
-      targets.add(vertex.toThrift());
-    }
-    if (edgeType_ == EdgeType.PROJECTION) {
-      return new TMultiEdge(sources, targets, TEdgeType.PROJECTION);
-    }
-    return new TMultiEdge(sources, targets, TEdgeType.PREDICATE);
-  }
-
-  /**
-   * Constructs a MultiEdge object from a thrift object
-   */
-  public static MultiEdge fromThrift(TMultiEdge obj){
-    Set<Vertex> sources = Sets.newHashSet();
-    for (TVertex vertex: obj.sources) {
-      sources.add(Vertex.fromThrift(vertex));
-    }
-    Set<Vertex> targets = Sets.newHashSet();
-    for (TVertex vertex: obj.targets) {
-      targets.add(Vertex.fromThrift(vertex));
-    }
-    if (obj.edgetype == TEdgeType.PROJECTION) {
-      return new MultiEdge(sources, targets, EdgeType.PROJECTION);
-    }
-    return new MultiEdge(sources, targets, EdgeType.PREDICATE);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    MultiEdge edge = (MultiEdge) obj;
-    return edge.sources_.equals(this.sources_) &&
-        edge.targets_.equals(this.targets_) &&
-        edge.edgeType_ == this.edgeType_;
-  }
-}
-
-/**
- * Represents the column lineage graph of a query. This is a directional graph that is
- * used to track dependencies among the table/column entities that participate in
- * a query. There are two types of dependencies that are represented as edges in the
- * column lineage graph:
- * a) Projection dependency: This is a dependency between a set of source
- * columns (base table columns) and a single target (result expr or table column).
- * This dependency indicates that values of the target depend on the values of the source
- * columns.
- * b) Predicate dependency: This is a dependency between a set of target
- * columns (or exprs) and a set of source columns (base table columns). It indicates that
- * the source columns restrict the values of their targets (e.g. by participating in
- * WHERE clause predicates).
- *
- * The following dependencies are generated for a query:
- * - Exactly one projection dependency for every result expr / target column.
- * - Exactly one predicate dependency that targets all result exprs / target cols and
- *   depends on all columns participating in a conjunct in the query.
- * - Special case of analytic fns: One predicate dependency per result expr / target col
- *   whose value is directly or indirectly affected by an analytic function with a
- *   partition by and/or order by clause.
- */
-public class ColumnLineageGraph {
-  private final static Logger LOG = LoggerFactory.getLogger(ColumnLineageGraph.class);
-  // Query statement
-  private String queryStr_;
-
-  // Name of the user that issued this query
-  private String user_;
-
-  private final List<Expr> resultDependencyPredicates_ = Lists.newArrayList();
-
-  private final List<MultiEdge> edges_ = Lists.newArrayList();
-
-  // Timestamp in seconds since epoch (GMT) this query was submitted for execution.
-  private long timestamp_;
-
-  // Map of Vertex labels to Vertex objects.
-  private final Map<String, Vertex> vertices_ = Maps.newHashMap();
-
-  // Map of Vertex ids to Vertex objects. Used primarily during the construction of the
-  // ColumnLineageGraph from a serialized JSON object.
-  private final Map<VertexId, Vertex> idToVertexMap_ = Maps.newHashMap();
-
-  // For an INSERT or a CTAS, these are the columns of the
-  // destination table plus any partitioning columns (when dynamic partitioning is used).
-  // For a SELECT stmt, they are the labels of the result exprs.
-  private final List<String> targetColumnLabels_ = Lists.newArrayList();
-
-  // Repository for tuple and slot descriptors for this query. Use it to construct the
-  // column lineage graph.
-  private DescriptorTable descTbl_;
-
-  private final IdGenerator<VertexId> vertexIdGenerator = VertexId.createGenerator();
-
-  public ColumnLineageGraph() { }
-
-  /**
-   * Private c'tor, used only for testing.
-   */
-  private ColumnLineageGraph(String stmt, String user, long timestamp) {
-    queryStr_ = stmt;
-    user_ = user;
-    timestamp_ = timestamp;
-  }
-
-  private void setVertices(Set<Vertex> vertices) {
-    for (Vertex vertex: vertices) {
-      vertices_.put(vertex.getLabel(), vertex);
-      idToVertexMap_.put(vertex.getVertexId(), vertex);
-    }
-  }
-
-  /**
-   * Creates a new MultiEdge in the column lineage graph from the sets of 'sources' and
-   * 'targets' labels (representing column names or result expr labels). The new
-   * MultiEdge object is returned.
-   */
-  private MultiEdge createMultiEdge(Set<String> targets, Set<String> sources,
-      MultiEdge.EdgeType type) {
-    Set<Vertex> targetVertices = Sets.newHashSet();
-    for (String target: targets) {
-      targetVertices.add(createVertex(target));
-    }
-    Set<Vertex> sourceVertices = Sets.newHashSet();
-    for (String source: sources) {
-      sourceVertices.add(createVertex(source));
-    }
-    MultiEdge edge = new MultiEdge(sourceVertices, targetVertices, type);
-    edges_.add(edge);
-    return edge;
-  }
-
-  /**
-   * Creates a new vertex in the column lineage graph. The new Vertex object is
-   * returned. If a Vertex with the same label already exists, reuse it.
-   */
-  private Vertex createVertex(String label) {
-    Vertex newVertex = vertices_.get(label);
-    if (newVertex != null) return newVertex;
-    newVertex = new Vertex(vertexIdGenerator.getNextId(), label);
-    vertices_.put(newVertex.getLabel(), newVertex);
-    idToVertexMap_.put(newVertex.getVertexId(), newVertex);
-    return newVertex;
-  }
-
-  /**
-   * Computes the column lineage graph of a query from the list of query result exprs.
-   * 'rootAnalyzer' is the Analyzer that was used for the analysis of the query.
-   */
-  public void computeLineageGraph(List<Expr> resultExprs, Analyzer rootAnalyzer) {
-    init(rootAnalyzer);
-    computeProjectionDependencies(resultExprs);
-    computeResultPredicateDependencies(rootAnalyzer);
-  }
-
-  /**
-   * Initialize the ColumnLineageGraph from the root analyzer of a query.
-   */
-  private void init(Analyzer analyzer) {
-    Preconditions.checkNotNull(analyzer);
-    Preconditions.checkState(analyzer.isRootAnalyzer());
-    TQueryCtx queryCtx = analyzer.getQueryCtx();
-    if (queryCtx.request.isSetRedacted_stmt()) {
-      queryStr_ = queryCtx.request.redacted_stmt;
-    } else {
-      queryStr_ = queryCtx.request.stmt;
-    }
-    Preconditions.checkNotNull(queryStr_);
-    SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
-    try {
-      timestamp_ = df.parse(queryCtx.now_string).getTime() / 1000;
-    } catch (java.text.ParseException e) {
-      LOG.error("Error parsing timestamp value: " + queryCtx.now_string +
-          " " + e.getMessage());
-      timestamp_ = new Date().getTime() / 1000;
-    }
-    descTbl_ = analyzer.getDescTbl();
-    user_ = analyzer.getUser().getName();
-  }
-
-  private void computeProjectionDependencies(List<Expr> resultExprs) {
-    Preconditions.checkNotNull(resultExprs);
-    Preconditions.checkState(!resultExprs.isEmpty());
-    Preconditions.checkState(resultExprs.size() == targetColumnLabels_.size());
-    for (int i = 0; i < resultExprs.size(); ++i) {
-      Expr expr = resultExprs.get(i);
-      Set<String> sourceBaseCols = Sets.newHashSet();
-      List<Expr> dependentExprs = Lists.newArrayList();
-      getSourceBaseCols(expr, sourceBaseCols, dependentExprs, false);
-      Set<String> targets = Sets.newHashSet(targetColumnLabels_.get(i));
-      createMultiEdge(targets, sourceBaseCols, MultiEdge.EdgeType.PROJECTION);
-      if (!dependentExprs.isEmpty()) {
-        // We have additional exprs that 'expr' has a predicate dependency on.
-        // Gather the transitive predicate dependencies of 'expr' based on its direct
-        // predicate dependencies. For each direct predicate dependency p, 'expr' is
-        // transitively predicate dependent on all exprs that p is projection and
-        // predicate dependent on.
-        Set<String> predicateBaseCols = Sets.newHashSet();
-        for (Expr dependentExpr: dependentExprs) {
-          getSourceBaseCols(dependentExpr, predicateBaseCols, null, true);
-        }
-        createMultiEdge(targets, predicateBaseCols, MultiEdge.EdgeType.PREDICATE);
-      }
-    }
-  }
-
-  /**
-   * Compute predicate dependencies for the query result, i.e. exprs that affect the
-   * possible values of the result exprs / target columns, such as predicates in a WHERE
-   * clause.
-   */
-  private void computeResultPredicateDependencies(Analyzer analyzer) {
-    List<Expr> conjuncts = analyzer.getConjuncts();
-    for (Expr expr: conjuncts) {
-      if (expr.isAuxExpr()) continue;
-      resultDependencyPredicates_.add(expr);
-    }
-    Set<String> predicateBaseCols = Sets.newHashSet();
-    for (Expr expr: resultDependencyPredicates_) {
-      getSourceBaseCols(expr, predicateBaseCols, null, true);
-    }
-    if (predicateBaseCols.isEmpty()) return;
-    Set<String> targets = Sets.newHashSet(targetColumnLabels_);
-    createMultiEdge(targets, predicateBaseCols, MultiEdge.EdgeType.PREDICATE);
-  }
-
-  /**
-   * Identify the base table columns that 'expr' is connected to by recursively resolving
-   * all associated slots through inline views and materialization points to base-table
-   * slots. If 'directPredDeps' is not null, it is populated with the exprs that
-   * have a predicate dependency with 'expr' (e.g. partitioning and order by exprs for
-   * the case of an analytic function). If 'traversePredDeps' is false, not all the
-   * children exprs of 'expr' are used to identify the base columns that 'expr' is
-   * connected to. Which children are filtered depends on the type of 'expr' (e.g. for
-   * AnalyticFunctionExpr, grouping and sorting exprs are filtered out).
-   */
-  private void getSourceBaseCols(Expr expr, Set<String> sourceBaseCols,
-      List<Expr> directPredDeps, boolean traversePredDeps) {
-    List<Expr> exprsToTraverse = getProjectionDeps(expr);
-    List<Expr> predicateDepExprs = getPredicateDeps(expr);
-    if (directPredDeps != null) directPredDeps.addAll(predicateDepExprs);
-    if (traversePredDeps) exprsToTraverse.addAll(predicateDepExprs);
-    List<SlotId> slotIds = Lists.newArrayList();
-    for (Expr e: exprsToTraverse) {
-      e.getIds(null, slotIds);
-    }
-    for (SlotId slotId: slotIds) {
-      SlotDescriptor slotDesc = descTbl_.getSlotDesc(slotId);
-      List<Expr> sourceExprs = slotDesc.getSourceExprs();
-      if (sourceExprs.isEmpty() && slotDesc.isScanSlot() &&
-          slotDesc.getPath().isRootedAtTuple()) {
-        // slot should correspond to a materialized tuple of a table
-        Preconditions.checkState(slotDesc.getParent().isMaterialized());
-        List<String> path = slotDesc.getPath().getCanonicalPath();
-        sourceBaseCols.add(Joiner.on(".").join(path));
-      } else {
-        for (Expr sourceExpr: sourceExprs) {
-          getSourceBaseCols(sourceExpr, sourceBaseCols, directPredDeps,
-              traversePredDeps);
-        }
-      }
-    }
-  }
-
-  /**
-   * Retrieve the exprs that 'e' is directly projection dependent on.
-   * TODO Handle conditional exprs (e.g. CASE, IF).
-   */
-  private List<Expr> getProjectionDeps(Expr e) {
-    Preconditions.checkNotNull(e);
-    List<Expr> outputExprs = Lists.newArrayList();
-    if (e instanceof AnalyticExpr) {
-      AnalyticExpr analytic = (AnalyticExpr) e;
-      outputExprs.addAll(analytic.getChildren().subList(0,
-          analytic.getFnCall().getParams().size()));
-    } else {
-      outputExprs.add(e);
-    }
-    return outputExprs;
-  }
-
-  /**
-   * Retrieve the exprs that 'e' is directly predicate dependent on.
-   * TODO Handle conditional exprs (e.g. CASE, IF).
-   */
-  private List<Expr> getPredicateDeps(Expr e) {
-    Preconditions.checkNotNull(e);
-    List<Expr> outputExprs = Lists.newArrayList();
-    if (e instanceof AnalyticExpr) {
-      AnalyticExpr analyticExpr = (AnalyticExpr) e;
-      outputExprs.addAll(analyticExpr.getPartitionExprs());
-      for (OrderByElement orderByElem: analyticExpr.getOrderByElements()) {
-        outputExprs.add(orderByElem.getExpr());
-      }
-    }
-    return outputExprs;
-  }
-
-  public void addDependencyPredicates(Collection<Expr> exprs) {
-    resultDependencyPredicates_.addAll(exprs);
-  }
-
-  /**
-   * Encodes the ColumnLineageGraph object to JSON.
-   */
-  public String toJson() {
-    if (Strings.isNullOrEmpty(queryStr_)) return "";
-    Map obj = new LinkedHashMap();
-    obj.put("queryText", queryStr_);
-    obj.put("hash", getQueryHash(queryStr_));
-    obj.put("user", user_);
-    obj.put("timestamp", timestamp_);
-    // Add edges
-    JSONArray edges = new JSONArray();
-    for (MultiEdge edge: edges_) {
-      edges.add(edge.toJson());
-    }
-    obj.put("edges", edges);
-    // Add vertices
-    TreeSet<Vertex> sortedVertices = Sets.newTreeSet(vertices_.values());
-    JSONArray vertices = new JSONArray();
-    for (Vertex vertex: sortedVertices) {
-      vertices.add(vertex.toJson());
-    }
-    obj.put("vertices", vertices);
-    return JSONValue.toJSONString(obj);
-  }
-
-  /**
-   * Serializes the ColumnLineageGraph to a thrift object
-   */
-  public TLineageGraph toThrift() {
-    TLineageGraph graph = new TLineageGraph();
-    if (Strings.isNullOrEmpty(queryStr_)) return graph;
-    graph.setQuery_text(queryStr_);
-    graph.setHash(getQueryHash(queryStr_));
-    graph.setUser(user_);
-    graph.setStarted(timestamp_);
-    // Add edges
-    List<TMultiEdge> edges = Lists.newArrayList();
-    for (MultiEdge edge: edges_) {
-      edges.add(edge.toThrift());
-    }
-    graph.setEdges(edges);
-    // Add vertices
-    TreeSet<Vertex> sortedVertices = Sets.newTreeSet(vertices_.values());
-    List<TVertex> vertices = Lists.newArrayList();
-    for (Vertex vertex: sortedVertices) {
-      vertices.add(vertex.toThrift());
-    }
-    graph.setVertices(vertices);
-    return graph;
-  }
-
-  /**
-   * Creates a LineageGraph object from a thrift object
-   */
-  public static ColumnLineageGraph fromThrift(TLineageGraph obj) {
-    ColumnLineageGraph lineage =
-        new ColumnLineageGraph(obj.query_text, obj.user, obj.started);
-    TreeSet<Vertex> vertices = Sets.newTreeSet();
-    for (TVertex vertex: obj.vertices) {
-      vertices.add(Vertex.fromThrift(vertex));
-    }
-    lineage.setVertices(vertices);
-    for (TMultiEdge edge: obj.edges) {
-      MultiEdge e = MultiEdge.fromThrift(edge);
-      lineage.edges_.add(e);
-    }
-    return lineage;
-  }
-
-  private String getQueryHash(String queryStr) {
-    Hasher hasher = Hashing.md5().newHasher();
-    hasher.putString(queryStr);
-    return hasher.hash().toString();
-  }
-
-  /**
-   * Creates a ColumnLineageGraph object from a serialized JSON record. The new
-   * ColumnLineageGraph object is returned. Used only during testing.
-   */
-  public static ColumnLineageGraph createFromJSON(String json) {
-    if (json == null || json.isEmpty()) return null;
-    JSONParser parser = new JSONParser();
-    Object obj = null;
-    try {
-      obj = parser.parse(json);
-    } catch (ParseException e) {
-      LOG.error("Error parsing serialized column lineage graph: " + e.getMessage());
-      return null;
-    }
-    if (!(obj instanceof JSONObject)) return null;
-    JSONObject jsonObj = (JSONObject) obj;
-    String stmt = (String) jsonObj.get("queryText");
-    String hash = (String) jsonObj.get("hash");
-    String user = (String) jsonObj.get("user");
-    long timestamp = (Long) jsonObj.get("timestamp");
-    ColumnLineageGraph graph = new ColumnLineageGraph(stmt, user, timestamp);
-    JSONArray serializedVertices = (JSONArray) jsonObj.get("vertices");
-    Set<Vertex> vertices = Sets.newHashSet();
-    for (int i = 0; i < serializedVertices.size(); ++i) {
-      Vertex v = Vertex.fromJsonObj((JSONObject) serializedVertices.get(i));
-      vertices.add(v);
-    }
-    graph.setVertices(vertices);
-    JSONArray serializedEdges = (JSONArray) jsonObj.get("edges");
-    for (int i = 0; i < serializedEdges.size(); ++i) {
-      MultiEdge e =
-          graph.createMultiEdgeFromJSONObj((JSONObject) serializedEdges.get(i));
-      graph.edges_.add(e);
-    }
-    return graph;
-  }
-
-  private MultiEdge createMultiEdgeFromJSONObj(JSONObject jsonEdge) {
-    Preconditions.checkNotNull(jsonEdge);
-    JSONArray sources = (JSONArray) jsonEdge.get("sources");
-    Set<Vertex> sourceVertices = getVerticesFromJSONArray(sources);
-    JSONArray targets = (JSONArray) jsonEdge.get("targets");
-    Set<Vertex> targetVertices = getVerticesFromJSONArray(targets);
-    MultiEdge.EdgeType type =
-        MultiEdge.EdgeType.valueOf((String) jsonEdge.get("edgeType"));
-    return new MultiEdge(sourceVertices, targetVertices, type);
-  }
-
-  private Set<Vertex> getVerticesFromJSONArray(JSONArray vertexIdArray) {
-    Set<Vertex> vertices = Sets.newHashSet();
-    for (int i = 0; i < vertexIdArray.size(); ++i) {
-      int sourceId = ((Long) vertexIdArray.get(i)).intValue();
-      Vertex sourceVertex = idToVertexMap_.get(new VertexId(sourceId));
-      Preconditions.checkNotNull(sourceVertex);
-      vertices.add(sourceVertex);
-    }
-    return vertices;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    ColumnLineageGraph g = (ColumnLineageGraph) obj;
-    if (!this.vertices_.equals(g.vertices_) ||
-        !this.edges_.equals(g.edges_)) {
-      return false;
-    }
-    return true;
-  }
-
-  public String debugString() {
-    StringBuilder builder = new StringBuilder();
-    for (MultiEdge edge: edges_) {
-      builder.append(edge.toString() + "\n");
-    }
-    builder.append(toJson());
-    return builder.toString();
-  }
-
-  public void addTargetColumnLabels(Collection<String> columnLabels) {
-    Preconditions.checkNotNull(columnLabels);
-    targetColumnLabels_.addAll(columnLabels);
-  }
-
-  public void addTargetColumnLabels(Table dstTable) {
-    Preconditions.checkNotNull(dstTable);
-    String tblFullName = dstTable.getFullName();
-    for (String columnName: dstTable.getColumnNames()) {
-      targetColumnLabels_.add(tblFullName + "." + columnName);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CompoundPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CompoundPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/CompoundPredicate.java
deleted file mode 100644
index 4869004..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CompoundPredicate.java
+++ /dev/null
@@ -1,216 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * &&, ||, ! predicates.
- *
- */
-public class CompoundPredicate extends Predicate {
-  public enum Operator {
-    AND("AND"),
-    OR("OR"),
-    NOT("NOT");
-
-    private final String description;
-
-    private Operator(String description) {
-      this.description = description;
-    }
-
-    @Override
-    public String toString() {
-      return description;
-    }
-  }
-  private final Operator op_;
-
-  public static void initBuiltins(Db db) {
-    // AND and OR are implemented as custom exprs, so they do not have a function symbol.
-    db.addBuiltin(ScalarFunction.createBuiltinOperator(
-        Operator.AND.name(), "",
-        Lists.<Type>newArrayList(Type.BOOLEAN, Type.BOOLEAN), Type.BOOLEAN));
-    db.addBuiltin(ScalarFunction.createBuiltinOperator(
-        Operator.OR.name(), "",
-        Lists.<Type>newArrayList(Type.BOOLEAN, Type.BOOLEAN), Type.BOOLEAN));
-    db.addBuiltin(ScalarFunction.createBuiltinOperator(
-        Operator.NOT.name(), "impala::CompoundPredicate::Not",
-        Lists.<Type>newArrayList(Type.BOOLEAN), Type.BOOLEAN));
-  }
-
-  public CompoundPredicate(Operator op, Expr e1, Expr e2) {
-    super();
-    this.op_ = op;
-    Preconditions.checkNotNull(e1);
-    children_.add(e1);
-    Preconditions.checkArgument(op == Operator.NOT && e2 == null
-        || op != Operator.NOT && e2 != null);
-    if (e2 != null) children_.add(e2);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected CompoundPredicate(CompoundPredicate other) {
-    super(other);
-    op_ = other.op_;
-  }
-
-  public Operator getOp() { return op_; }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((CompoundPredicate) obj).op_ == op_;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("op", op_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  public String toSqlImpl() {
-    if (children_.size() == 1) {
-      Preconditions.checkState(op_ == Operator.NOT);
-      return "NOT " + getChild(0).toSql();
-    } else {
-      return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
-    }
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.COMPOUND_PRED;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    // Check that children are predicates.
-    for (Expr e: children_) {
-      if (!e.getType().isBoolean() && !e.getType().isNull()) {
-        throw new AnalysisException(String.format("Operand '%s' part of predicate " +
-            "'%s' should return type 'BOOLEAN' but returns type '%s'.",
-            e.toSql(), toSql(), e.getType().toSql()));
-      }
-    }
-
-    fn_ = getBuiltinFunction(analyzer, op_.toString(), collectChildReturnTypes(),
-        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    Preconditions.checkState(fn_ != null);
-    Preconditions.checkState(fn_.getReturnType().isBoolean());
-    castForFunctionCall(false);
-    if (hasChildCosts()) evalCost_ = getChildCosts() + COMPOUND_PREDICATE_COST;
-
-    if (!getChild(0).hasSelectivity() ||
-        (children_.size() == 2 && !getChild(1).hasSelectivity())) {
-      // Give up if one of our children has an unknown selectivity.
-      selectivity_ = -1;
-      return;
-    }
-
-    switch (op_) {
-      case AND:
-        selectivity_ = getChild(0).selectivity_ * getChild(1).selectivity_;
-        break;
-      case OR:
-        selectivity_ = getChild(0).selectivity_ + getChild(1).selectivity_
-            - getChild(0).selectivity_ * getChild(1).selectivity_;
-        break;
-      case NOT:
-        selectivity_ = 1.0 - getChild(0).selectivity_;
-        break;
-    }
-    selectivity_ = Math.max(0.0, Math.min(1.0, selectivity_));
-  }
-
-  /**
-   * Retrieve the slots bound by BinaryPredicate, InPredicate and
-   * CompoundPredicates in the subtree rooted at 'this'.
-   */
-  public ArrayList<SlotRef> getBoundSlots() {
-    ArrayList<SlotRef> slots = Lists.newArrayList();
-    for (int i = 0; i < getChildren().size(); ++i) {
-      if (getChild(i) instanceof BinaryPredicate ||
-          getChild(i) instanceof InPredicate) {
-        slots.add(((Predicate)getChild(i)).getBoundSlot());
-      } else if (getChild(i) instanceof CompoundPredicate) {
-        slots.addAll(((CompoundPredicate)getChild(i)).getBoundSlots());
-      }
-    }
-    return slots;
-  }
-
-  /**
-   * Negates a CompoundPredicate.
-   */
-  @Override
-  public Expr negate() {
-    if (op_ == Operator.NOT) return getChild(0);
-    Expr negatedLeft = getChild(0).negate();
-    Expr negatedRight = getChild(1).negate();
-    Operator newOp = (op_ == Operator.OR) ? Operator.AND : Operator.OR;
-    return new CompoundPredicate(newOp, negatedLeft, negatedRight);
-  }
-
-  /**
-   * Creates a conjunctive predicate from a list of exprs.
-   */
-  public static Expr createConjunctivePredicate(List<Expr> conjuncts) {
-    Expr conjunctivePred = null;
-    for (Expr expr: conjuncts) {
-      if (conjunctivePred == null) {
-        conjunctivePred = expr;
-        continue;
-      }
-      conjunctivePred = new CompoundPredicate(CompoundPredicate.Operator.AND,
-          expr, conjunctivePred);
-    }
-    return conjunctivePred;
-  }
-
-  @Override
-  public Expr clone() { return new CompoundPredicate(this); }
-
-  // Create an AND predicate between two exprs, 'lhs' and 'rhs'. If
-  // 'rhs' is null, simply return 'lhs'.
-  public static Expr createConjunction(Expr lhs, Expr rhs) {
-    if (rhs == null) return lhs;
-    return new CompoundPredicate(Operator.AND, rhs, lhs);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ComputeStatsStmt.java
deleted file mode 100644
index cd01713..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ComputeStatsStmt.java
+++ /dev/null
@@ -1,553 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TComputeStatsParams;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a COMPUTE STATS <table> and COMPUTE INCREMENTAL STATS <table> [PARTITION
- * <part_spec>] statement for statistics collection. The former statement gathers all
- * table and column stats for a given table and stores them in the Metastore via the
- * CatalogService. All existing stats for that table are replaced and no existing stats
- * are reused. The latter, incremental form, similarly computes stats for the whole table
- * but does so by re-using stats from partitions which have 'valid' statistics. Statistics
- * are 'valid' currently if they exist, in the future they may be expired based on recency
- * etc.
- *
- * TODO: Allow more coarse/fine grained (db, column)
- * TODO: Compute stats on complex types.
- */
-public class ComputeStatsStmt extends StatementBase {
-  private static final Logger LOG = Logger.getLogger(ComputeStatsStmt.class);
-
-  private static String AVRO_SCHEMA_MSG_PREFIX = "Cannot COMPUTE STATS on Avro table " +
-      "'%s' because its column definitions do not match those in the Avro schema.";
-  private static String AVRO_SCHEMA_MSG_SUFFIX = "Please re-create the table with " +
-          "column definitions, e.g., using the result of 'SHOW CREATE TABLE'";
-
-  protected final TableName tableName_;
-
-  // Set during analysis.
-  protected Table table_;
-
-  // The Null count is not currently being used in optimization or run-time,
-  // and compute stats runs 2x faster in many cases when not counting NULLs.
-  private static final boolean COUNT_NULLS = false;
-
-  // Query for getting the per-partition row count and the total row count.
-  // Set during analysis.
-  protected String tableStatsQueryStr_;
-
-  // Query for getting the per-column NDVs and number of NULLs.
-  // Set during analysis.
-  protected String columnStatsQueryStr_;
-
-  // If true, stats will be gathered incrementally per-partition.
-  private boolean isIncremental_ = false;
-
-  // If true, expect the compute stats process to produce output for all partitions in the
-  // target table (only meaningful, therefore, if partitioned). This is always true for
-  // non-incremental computations. If set, expectedPartitions_ will be empty - the point
-  // of this flag is to optimise the case where all partitions are targeted.
-  private boolean expectAllPartitions_ = false;
-
-  // The list of valid partition statistics that can be used in an incremental computation
-  // without themselves being recomputed. Populated in analyze().
-  private final List<TPartitionStats> validPartStats_ = Lists.newArrayList();
-
-  // For incremental computations, the list of partitions (identified by list of partition
-  // column values) that we expect to receive results for. Used to ensure that even empty
-  // partitions emit results.
-  // TODO: Consider using partition IDs (and adding them to the child queries with a
-  // PARTITION_ID() builtin)
-  private final List<List<String>> expectedPartitions_ = Lists.newArrayList();
-
-  // If non-null, the partition that an incremental computation might apply to. Must be
-  // null if this is a non-incremental computation.
-  private PartitionSpec partitionSpec_ = null;
-
-  // The maximum number of partitions that may be explicitly selected by filter
-  // predicates. Any query that selects more than this automatically drops back to a full
-  // incremental stats recomputation.
-  // TODO: We can probably do better than this, e.g. running several queries, each of
-  // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
-  private static final int MAX_INCREMENTAL_PARTITIONS = 1000;
-
-  /**
-   * Constructor for the non-incremental form of COMPUTE STATS.
-   */
-  protected ComputeStatsStmt(TableName tableName) {
-    this(tableName, false, null);
-  }
-
-  /**
-   * Constructor for the incremental form of COMPUTE STATS. If isIncremental is true,
-   * statistics will be recomputed incrementally; if false they will be recomputed for the
-   * whole table. The partition spec partSpec can specify a single partition whose stats
-   * should be recomputed.
-   */
-  protected ComputeStatsStmt(TableName tableName, boolean isIncremental,
-      PartitionSpec partSpec) {
-    Preconditions.checkState(tableName != null && !tableName.isEmpty());
-    Preconditions.checkState(isIncremental || partSpec == null);
-    this.tableName_ = tableName;
-    this.table_ = null;
-    this.isIncremental_ = isIncremental;
-    this.partitionSpec_ = partSpec;
-    if (partitionSpec_ != null) {
-      partitionSpec_.setTableName(tableName);
-      partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    }
-  }
-
-  /**
-   * Utility method for constructing the child queries to add partition columns to both a
-   * select list and a group-by list; the former are wrapped in a cast to a string.
-   */
-  private void addPartitionCols(HdfsTable table, List<String> selectList,
-      List<String> groupByCols) {
-    for (int i = 0; i < table.getNumClusteringCols(); ++i) {
-      String colRefSql = ToSqlUtils.getIdentSql(table.getColumns().get(i).getName());
-      groupByCols.add(colRefSql);
-      // For the select list, wrap the group by columns in a cast to string because
-      // the Metastore stores them as strings.
-      selectList.add(colRefSql);
-    }
-  }
-
-  private List<String> getBaseColumnStatsQuerySelectList(Analyzer analyzer) {
-    List<String> columnStatsSelectList = Lists.newArrayList();
-    // For Hdfs tables, exclude partition columns from stats gathering because Hive
-    // cannot store them as part of the non-partition column stats. For HBase tables,
-    // include the single clustering column (the row key).
-    int startColIdx = (table_ instanceof HBaseTable) ? 0 : table_.getNumClusteringCols();
-    final String ndvUda = isIncremental_ ? "NDV_NO_FINALIZE" : "NDV";
-
-    for (int i = startColIdx; i < table_.getColumns().size(); ++i) {
-      Column c = table_.getColumns().get(i);
-      Type type = c.getType();
-
-      // Ignore columns with an invalid/unsupported type. For example, complex types in
-      // an HBase-backed table will appear as invalid types.
-      if (!type.isValid() || !type.isSupported()
-          || c.getType().isComplexType()) {
-        continue;
-      }
-      // NDV approximation function. Add explicit alias for later identification when
-      // updating the Metastore.
-      String colRefSql = ToSqlUtils.getIdentSql(c.getName());
-      columnStatsSelectList.add(ndvUda + "(" + colRefSql + ") AS " + colRefSql);
-
-      if (COUNT_NULLS) {
-        // Count the number of NULL values.
-        columnStatsSelectList.add("COUNT(IF(" + colRefSql + " IS NULL, 1, NULL))");
-      } else {
-        // Using -1 to indicate "unknown". We need cast to BIGINT because backend expects
-        // an i64Val as the number of NULLs returned by the COMPUTE STATS column stats
-        // child query. See CatalogOpExecutor::SetColumnStats(). If we do not cast, then
-        // the -1 will be treated as TINYINT resulting a 0 to be placed in the #NULLs
-        // column (see IMPALA-1068).
-        columnStatsSelectList.add("CAST(-1 as BIGINT)");
-      }
-
-      // For STRING columns also compute the max and avg string length.
-      if (type.isStringType()) {
-        columnStatsSelectList.add("MAX(length(" + colRefSql + "))");
-        columnStatsSelectList.add("AVG(length(" + colRefSql + "))");
-      } else {
-        // For non-STRING columns we use the fixed size of the type.
-        // We store the same information for all types to avoid having to
-        // treat STRING columns specially in the BE CatalogOpExecutor.
-        Integer typeSize = type.getPrimitiveType().getSlotSize();
-        columnStatsSelectList.add(typeSize.toString());
-        columnStatsSelectList.add("CAST(" + typeSize.toString() + " as DOUBLE)");
-      }
-
-      if (isIncremental_) {
-        // Need the count in order to properly combine per-partition column stats
-        columnStatsSelectList.add("COUNT(" + colRefSql + ")");
-      }
-    }
-    return columnStatsSelectList;
-  }
-
-  /**
-   * Constructs two queries to compute statistics for 'tableName_', if that table exists
-   * (although if we can detect that no work needs to be done for either query, that query
-   * will be 'null' and not executed).
-   *
-   * The first query computes the number of rows (on a per-partition basis if the table is
-   * partitioned) and has the form "SELECT COUNT(*) FROM tbl GROUP BY part_col1,
-   * part_col2...", with an optional WHERE clause for incremental computation (see below).
-   *
-   * The second query computes the NDV estimate, the average width, the maximum width and,
-   * optionally, the number of nulls for each column. For non-partitioned tables (or
-   * non-incremental computations), the query is simple:
-   *
-   * SELECT NDV(col), COUNT(<nulls>), MAX(length(col)), AVG(length(col)) FROM tbl
-   *
-   * (For non-string columns, the widths are hard-coded as they are known at query
-   * construction time).
-   *
-   * If computation is incremental (i.e. the original statement was COMPUTE INCREMENTAL
-   * STATS.., and the underlying table is a partitioned HdfsTable), some modifications are
-   * made to the non-incremental per-column query. First, a different UDA,
-   * NDV_NO_FINALIZE() is used to retrieve and serialise the intermediate state from each
-   * column. Second, the results are grouped by partition, as with the row count query, so
-   * that the intermediate NDV computation state can be stored per-partition. The number
-   * of rows per-partition are also recorded.
-   *
-   * For both the row count query, and the column stats query, the query's WHERE clause is
-   * used to restrict execution only to partitions that actually require new statistics to
-   * be computed.
-   *
-   * SELECT NDV_NO_FINALIZE(col), <nulls, max, avg>, COUNT(col) FROM tbl
-   * GROUP BY part_col1, part_col2, ...
-   * WHERE ((part_col1 = p1_val1) AND (part_col2 = p1_val2)) OR
-   *       ((part_col1 = p2_val1) AND (part_col2 = p2_val2)) OR ...
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
-    String sqlTableName = table_.getTableName().toSql();
-    if (table_ instanceof View) {
-      throw new AnalysisException(String.format(
-          "COMPUTE STATS not supported for view %s", sqlTableName));
-    }
-
-    if (!(table_ instanceof HdfsTable)) {
-      if (partitionSpec_ != null) {
-        throw new AnalysisException("COMPUTE INCREMENTAL ... PARTITION not supported " +
-            "for non-HDFS table " + table_.getTableName());
-      }
-      isIncremental_ = false;
-    }
-
-    // Ensure that we write an entry for every partition if this isn't incremental
-    if (!isIncremental_) expectAllPartitions_ = true;
-
-    HdfsTable hdfsTable = null;
-    if (table_ instanceof HdfsTable) {
-      hdfsTable = (HdfsTable)table_;
-      if (isIncremental_ && hdfsTable.getNumClusteringCols() == 0 &&
-          partitionSpec_ != null) {
-          throw new AnalysisException(String.format(
-              "Can't compute PARTITION stats on an unpartitioned table: %s",
-              sqlTableName));
-      } else if (partitionSpec_ != null) {
-          partitionSpec_.setPartitionShouldExist();
-          partitionSpec_.analyze(analyzer);
-          for (PartitionKeyValue kv: partitionSpec_.getPartitionSpecKeyValues()) {
-            // TODO: We could match the dynamic keys (i.e. as wildcards) as well, but that
-            // would involve looping over all partitions and seeing which match the
-            // partition spec.
-            if (!kv.isStatic()) {
-              throw new AnalysisException("All partition keys must have values: " +
-                  kv.toString());
-            }
-          }
-      }
-      // For incremental stats, estimate the size of intermediate stats and report an
-      // error if the estimate is greater than MAX_INCREMENTAL_STATS_SIZE_BYTES.
-      if (isIncremental_) {
-        long statsSizeEstimate = hdfsTable.getColumns().size() *
-            hdfsTable.getPartitions().size() * HdfsTable.STATS_SIZE_PER_COLUMN_BYTES;
-        if (statsSizeEstimate > HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES) {
-          LOG.error("Incremental stats size estimate for table " + hdfsTable.getName() +
-              " exceeded " + HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES + ", estimate = "
-              + statsSizeEstimate);
-          throw new AnalysisException("Incremental stats size estimate exceeds "
-              + PrintUtils.printBytes(HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES)
-              + ". Please try COMPUTE STATS instead.");
-        }
-      }
-    }
-
-    // Build partition filters that only select partitions without valid statistics for
-    // incremental computation.
-    List<String> filterPreds = Lists.newArrayList();
-    if (isIncremental_) {
-      if (partitionSpec_ == null) {
-        // If any column does not have stats, we recompute statistics for all partitions
-        // TODO: need a better way to invalidate stats for all partitions, so that we can
-        // use this logic to only recompute new / changed columns.
-        boolean tableIsMissingColStats = false;
-
-        // We'll warn the user if a column is missing stats (and therefore we rescan the
-        // whole table), but if all columns are missing stats, the table just doesn't have
-        // any stats and there's no need to warn.
-        boolean allColumnsMissingStats = true;
-        String exampleColumnMissingStats = null;
-        // Partition columns always have stats, so exclude them from this search
-        for (Column col: table_.getNonClusteringColumns()) {
-          if (!col.getStats().hasStats()) {
-            if (!tableIsMissingColStats) {
-              tableIsMissingColStats = true;
-              exampleColumnMissingStats = col.getName();
-            }
-          } else {
-            allColumnsMissingStats = false;
-          }
-        }
-
-        if (tableIsMissingColStats && !allColumnsMissingStats) {
-          analyzer.addWarning("Column " + exampleColumnMissingStats +
-              " does not have statistics, recomputing stats for the whole table");
-        }
-
-        for (HdfsPartition p: hdfsTable.getPartitions()) {
-          if (p.isDefaultPartition()) continue;
-          TPartitionStats partStats = p.getPartitionStats();
-          if (!p.hasIncrementalStats() || tableIsMissingColStats) {
-            if (partStats == null) LOG.trace(p.toString() + " does not have stats");
-            if (!tableIsMissingColStats) filterPreds.add(p.getConjunctSql());
-            List<String> partValues = Lists.newArrayList();
-            for (LiteralExpr partValue: p.getPartitionValues()) {
-              partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
-                  "NULL"));
-            }
-            expectedPartitions_.add(partValues);
-          } else {
-            LOG.trace(p.toString() + " does have statistics");
-            validPartStats_.add(partStats);
-          }
-        }
-        if (expectedPartitions_.size() == hdfsTable.getPartitions().size() - 1) {
-          expectedPartitions_.clear();
-          expectAllPartitions_ = true;
-        }
-      } else {
-        // Always compute stats on a particular partition when told to.
-        List<String> partitionConjuncts = Lists.newArrayList();
-        for (PartitionKeyValue kv: partitionSpec_.getPartitionSpecKeyValues()) {
-          partitionConjuncts.add(kv.toPredicateSql());
-        }
-        filterPreds.add("(" + Joiner.on(" AND ").join(partitionConjuncts) + ")");
-        HdfsPartition targetPartition =
-            hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
-        List<String> partValues = Lists.newArrayList();
-        for (LiteralExpr partValue: targetPartition.getPartitionValues()) {
-          partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
-              "NULL"));
-        }
-        expectedPartitions_.add(partValues);
-        for (HdfsPartition p: hdfsTable.getPartitions()) {
-          if (p.isDefaultPartition()) continue;
-          if (p == targetPartition) continue;
-          TPartitionStats partStats = p.getPartitionStats();
-          if (partStats != null) validPartStats_.add(partStats);
-        }
-      }
-
-      if (filterPreds.size() == 0 && validPartStats_.size() != 0) {
-        LOG.info("No partitions selected for incremental stats update");
-        analyzer.addWarning("No partitions selected for incremental stats update");
-        return;
-      }
-    }
-
-    if (filterPreds.size() > MAX_INCREMENTAL_PARTITIONS) {
-      // TODO: Consider simply running for MAX_INCREMENTAL_PARTITIONS partitions, and then
-      // advising the user to iterate.
-      analyzer.addWarning(
-          "Too many partitions selected, doing full recomputation of incremental stats");
-      filterPreds.clear();
-      validPartStats_.clear();
-    }
-
-    List<String> groupByCols = Lists.newArrayList();
-    List<String> partitionColsSelectList = Lists.newArrayList();
-    // Only add group by clause for HdfsTables.
-    if (hdfsTable != null) {
-      if (hdfsTable.isAvroTable()) checkIncompleteAvroSchema(hdfsTable);
-      addPartitionCols(hdfsTable, partitionColsSelectList, groupByCols);
-    }
-
-    // Query for getting the per-partition row count and the total row count.
-    StringBuilder tableStatsQueryBuilder = new StringBuilder("SELECT ");
-    List<String> tableStatsSelectList = Lists.newArrayList();
-    tableStatsSelectList.add("COUNT(*)");
-
-    tableStatsSelectList.addAll(partitionColsSelectList);
-    tableStatsQueryBuilder.append(Joiner.on(", ").join(tableStatsSelectList));
-    tableStatsQueryBuilder.append(" FROM " + sqlTableName);
-
-    // Query for getting the per-column NDVs and number of NULLs.
-    List<String> columnStatsSelectList = getBaseColumnStatsQuerySelectList(analyzer);
-
-    if (isIncremental_) columnStatsSelectList.addAll(partitionColsSelectList);
-
-    StringBuilder columnStatsQueryBuilder = new StringBuilder("SELECT ");
-    columnStatsQueryBuilder.append(Joiner.on(", ").join(columnStatsSelectList));
-    columnStatsQueryBuilder.append(" FROM " + sqlTableName);
-
-    // Add the WHERE clause to filter out partitions that we don't want to compute
-    // incremental stats for. While this is a win in most situations, we would like to
-    // avoid this where it does no useful work (i.e. it selects all rows). This happens
-    // when there are no existing valid partitions (so all partitions will have been
-    // selected in) and there is no partition spec (so no single partition was explicitly
-    // selected in).
-    if (filterPreds.size() > 0 &&
-        (validPartStats_.size() > 0 || partitionSpec_ != null)) {
-      String filterClause = " WHERE " + Joiner.on(" OR ").join(filterPreds);
-      columnStatsQueryBuilder.append(filterClause);
-      tableStatsQueryBuilder.append(filterClause);
-    }
-
-    if (groupByCols.size() > 0) {
-      String groupBy = " GROUP BY " + Joiner.on(", ").join(groupByCols);
-      if (isIncremental_) columnStatsQueryBuilder.append(groupBy);
-      tableStatsQueryBuilder.append(groupBy);
-    }
-
-    tableStatsQueryStr_ = tableStatsQueryBuilder.toString();
-    LOG.debug("Table stats query: " + tableStatsQueryStr_);
-
-    if (columnStatsSelectList.isEmpty()) {
-      // Table doesn't have any columns that we can compute stats for.
-      LOG.info("No supported column types in table " + table_.getTableName() +
-          ", no column statistics will be gathered.");
-      columnStatsQueryStr_ = null;
-      return;
-    }
-
-    columnStatsQueryStr_ = columnStatsQueryBuilder.toString();
-    LOG.debug("Column stats query: " + columnStatsQueryStr_);
-  }
-
-  /**
-   * Checks whether the column definitions from the CREATE TABLE stmt match the columns
-   * in the Avro schema. If there is a mismatch, then COMPUTE STATS cannot update the
-   * statistics in the Metastore's backend DB due to HIVE-6308. Throws an
-   * AnalysisException for such ill-created Avro tables. Does nothing if
-   * the column definitions match the Avro schema exactly.
-   */
-  private void checkIncompleteAvroSchema(HdfsTable table) throws AnalysisException {
-    Preconditions.checkState(table.isAvroTable());
-    org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable();
-    // The column definitions from 'CREATE TABLE (column definitions) ...'
-    Iterator<FieldSchema> colDefs = msTable.getSd().getCols().iterator();
-    // The columns derived from the Avro schema file or literal schema.
-    // Inconsistencies between the Avro-schema columns and the column definitions
-    // are sometimes resolved in the CREATE TABLE, and sometimes not (see below).
-    Iterator<Column> avroSchemaCols = table.getColumns().iterator();
-    // Skip partition columns from 'table' since those are not present in
-    // the msTable field schemas.
-    for (int i = 0; i < table.getNumClusteringCols(); ++i) {
-      if (avroSchemaCols.hasNext()) avroSchemaCols.next();
-    }
-    int pos = 0;
-    while (colDefs.hasNext() || avroSchemaCols.hasNext()) {
-      if (colDefs.hasNext() && avroSchemaCols.hasNext()) {
-        FieldSchema colDef = colDefs.next();
-        Column avroSchemaCol = avroSchemaCols.next();
-        // Check that the column names are identical. Ignore mismatched types
-        // as those will either fail in the scan or succeed.
-        if (!colDef.getName().equalsIgnoreCase(avroSchemaCol.getName())) {
-          throw new AnalysisException(
-              String.format(AVRO_SCHEMA_MSG_PREFIX +
-                  "\nDefinition of column '%s' of type '%s' does not match " +
-                  "the Avro-schema column '%s' of type '%s' at position '%s'.\n" +
-                  AVRO_SCHEMA_MSG_SUFFIX,
-                  table.getName(), colDef.getName(), colDef.getType(),
-                  avroSchemaCol.getName(), avroSchemaCol.getType(), pos));
-        }
-      }
-      // The following two cases are typically not possible because Hive resolves
-      // inconsistencies between the column-definition list and the Avro schema if a
-      // column-definition list was given in the CREATE TABLE (having no column
-      // definitions at all results in HIVE-6308). Even so, we check these cases for
-      // extra safety. COMPUTE STATS could be made to succeed in special instances of
-      // the cases below but we chose to throw an AnalysisException to avoid confusion
-      // because this scenario "should" never arise as mentioned above.
-      if (colDefs.hasNext() && !avroSchemaCols.hasNext()) {
-        FieldSchema colDef = colDefs.next();
-        throw new AnalysisException(
-            String.format(AVRO_SCHEMA_MSG_PREFIX +
-                "\nMissing Avro-schema column corresponding to column " +
-                "definition '%s' of type '%s' at position '%s'.\n" +
-                AVRO_SCHEMA_MSG_SUFFIX,
-                table.getName(), colDef.getName(), colDef.getType(), pos));
-      }
-      if (!colDefs.hasNext() && avroSchemaCols.hasNext()) {
-        Column avroSchemaCol = avroSchemaCols.next();
-        throw new AnalysisException(
-            String.format(AVRO_SCHEMA_MSG_PREFIX +
-                "\nMissing column definition corresponding to Avro-schema " +
-                "column '%s' of type '%s' at position '%s'.\n" +
-                AVRO_SCHEMA_MSG_SUFFIX,
-                table.getName(), avroSchemaCol.getName(), avroSchemaCol.getType(), pos));
-      }
-      ++pos;
-    }
-  }
-
-  public String getTblStatsQuery() { return tableStatsQueryStr_; }
-  public String getColStatsQuery() { return columnStatsQueryStr_; }
-
-  @Override
-  public String toSql() {
-    if (!isIncremental_) {
-      return "COMPUTE STATS " + tableName_.toSql();
-    } else {
-      return "COMPUTE INCREMENTAL STATS " + tableName_.toSql() +
-          partitionSpec_ == null ? "" : partitionSpec_.toSql();
-    }
-  }
-
-  public TComputeStatsParams toThrift() {
-    TComputeStatsParams params = new TComputeStatsParams();
-    params.setTable_name(new TTableName(table_.getDb().getName(), table_.getName()));
-    params.setTbl_stats_query(tableStatsQueryStr_);
-    if (columnStatsQueryStr_ != null) {
-      params.setCol_stats_query(columnStatsQueryStr_);
-    } else {
-      params.setCol_stats_queryIsSet(false);
-    }
-
-    params.setIs_incremental(isIncremental_);
-    params.setExisting_part_stats(validPartStats_);
-    params.setExpect_all_partitions(expectAllPartitions_);
-    if (!expectAllPartitions_) params.setExpected_partitions(expectedPartitions_);
-    if (isIncremental_) {
-      params.setNum_partition_cols(((HdfsTable)table_).getNumClusteringCols());
-    }
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateDataSrcStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateDataSrcStmt.java
deleted file mode 100644
index 1ee6fd4..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateDataSrcStmt.java
+++ /dev/null
@@ -1,97 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.extdatasource.ApiVersion;
-import com.cloudera.impala.thrift.TCreateDataSourceParams;
-import com.cloudera.impala.thrift.TDataSource;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a CREATE DATA SOURCE statement.
- */
-public class CreateDataSrcStmt extends StatementBase {
-  private final String dataSrcName_;
-  private final String className_;
-  private final String apiVersionString_;
-  private final HdfsUri location_;
-  private final boolean ifNotExists_;
-  private ApiVersion apiVersion_;
-
-  public CreateDataSrcStmt(String dataSrcName, HdfsUri location, String className,
-      String apiVersionString, boolean ifNotExists) {
-    Preconditions.checkNotNull(dataSrcName);
-    Preconditions.checkNotNull(className);
-    Preconditions.checkNotNull(apiVersionString);
-    Preconditions.checkNotNull(location);
-    dataSrcName_ = dataSrcName.toLowerCase();
-    location_ = location;
-    className_ = className;
-    apiVersionString_ = apiVersionString;
-    ifNotExists_ = ifNotExists;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_)) {
-      throw new AnalysisException("Invalid data source name: " + dataSrcName_);
-    }
-    if (!ifNotExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) != null) {
-      throw new AnalysisException(Analyzer.DATA_SRC_ALREADY_EXISTS_ERROR_MSG +
-          dataSrcName_);
-    }
-
-    apiVersion_ = ApiVersion.parseApiVersion(apiVersionString_);
-    if (apiVersion_ == null) {
-      throw new AnalysisException("Invalid API version: '" + apiVersionString_ +
-          "'. Valid API versions: " + Joiner.on(", ").join(ApiVersion.values()));
-    }
-
-    location_.analyze(analyzer, Privilege.ALL, FsAction.READ);
-    // TODO: Check class exists and implements API version
-    // TODO: authorization check
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("CREATE DATA SOURCE ");
-    if (ifNotExists_) sb.append("IF NOT EXISTS ");
-    sb.append(dataSrcName_);
-    sb.append(" LOCATION '");
-    sb.append(location_.getLocation());
-    sb.append("' CLASS '");
-    sb.append(className_);
-    sb.append("' API_VERSION '");
-    sb.append(apiVersion_.name());
-    sb.append("'");
-    return sb.toString();
-  }
-
-  public TCreateDataSourceParams toThrift() {
-    return new TCreateDataSourceParams(
-        new TDataSource(dataSrcName_, location_.toString(), className_,
-            apiVersion_.name())).setIf_not_exists(ifNotExists_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateDbStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateDbStmt.java
deleted file mode 100644
index 3dedd8b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateDbStmt.java
+++ /dev/null
@@ -1,102 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateDbParams;
-
-/**
- * Represents a CREATE DATABASE statement
- */
-public class CreateDbStmt extends StatementBase {
-  private final String dbName_;
-  private final HdfsUri location_;
-  private final String comment_;
-  private final boolean ifNotExists_;
-
-  /**
-   * Creates a database with the given name.
-   */
-  public CreateDbStmt(String dbName) {
-    this(dbName, null, null, false);
-  }
-
-  /**
-   * Creates a database with the given name, comment, and HDFS table storage location.
-   * New tables created in the database inherit the location property for their default
-   * storage location. Create database will throw an error if the database already exists
-   * unless the ifNotExists is true.
-   */
-  public CreateDbStmt(String dbName, String comment, HdfsUri location,
-      boolean ifNotExists) {
-    this.dbName_ = dbName;
-    this.comment_ = comment;
-    this.location_ = location;
-    this.ifNotExists_ = ifNotExists;
-  }
-
-  public String getComment() { return comment_; }
-  public String getDb() { return dbName_; }
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public HdfsUri getLocation() { return location_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("CREATE DATABASE");
-    if (ifNotExists_) sb.append(" IF NOT EXISTS");
-    sb.append(dbName_);
-    if (comment_ != null) sb.append(" COMMENT '" + comment_ + "'");
-    if (location_ != null) sb.append(" LOCATION '" + location_ + "'");
-    return sb.toString();
-  }
-
-  public TCreateDbParams toThrift() {
-    TCreateDbParams params = new TCreateDbParams();
-    params.setDb(getDb());
-    params.setComment(getComment());
-    params.setLocation(location_ == null ? null : location_.toString());
-    params.setIf_not_exists(getIfNotExists());
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Check whether the db name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(dbName_)) {
-      throw new AnalysisException("Invalid database name: " + dbName_);
-    }
-
-    // Note: It is possible that a database with the same name was created external to
-    // this Impala instance. If that happens, the caller will not get an
-    // AnalysisException when creating the database, they will get a Hive
-    // AlreadyExistsException once the request has been sent to the metastore.
-    Db db = analyzer.getDb(getDb(), Privilege.CREATE, false);
-    if (db != null && !ifNotExists_) {
-      throw new AnalysisException(Analyzer.DB_ALREADY_EXISTS_ERROR_MSG + getDb());
-    }
-
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateDropRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateDropRoleStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateDropRoleStmt.java
deleted file mode 100644
index ef90b8a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateDropRoleStmt.java
+++ /dev/null
@@ -1,63 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a "CREATE ROLE" or "DROP ROLE" statement.
- */
-public class CreateDropRoleStmt extends AuthorizationStmt {
-  private final String roleName_;
-  private final boolean isDropRole_;
-
-  // Set in analysis
-  private String user_;
-
-  public CreateDropRoleStmt(String roleName, boolean isDropRole) {
-    Preconditions.checkNotNull(roleName);
-    roleName_ = roleName;
-    isDropRole_ = isDropRole;
-  }
-
-  @Override
-  public String toSql() {
-    return String.format("%s ROLE %s", roleName_, isDropRole_ ? "DROP" : "CREATE");
-  }
-
-  public TCreateDropRoleParams toThrift() {
-    TCreateDropRoleParams params = new TCreateDropRoleParams();
-    params.setRole_name(roleName_);
-    params.setIs_drop(isDropRole_);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Role existingRole = analyzer.getCatalog().getAuthPolicy().getRole(roleName_);
-    if (isDropRole_ && existingRole == null) {
-      throw new AnalysisException(String.format("Role '%s' does not exist.", roleName_));
-    } else if (!isDropRole_ && existingRole != null) {
-      throw new AnalysisException(String.format("Role '%s' already exists.", roleName_));
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateFunctionStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateFunctionStmtBase.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateFunctionStmtBase.java
deleted file mode 100644
index ebfd7b6..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateFunctionStmtBase.java
+++ /dev/null
@@ -1,206 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.hadoop.fs.permission.FsAction;
-
-import com.cloudera.impala.authorization.AuthorizeableFn;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateFunctionParams;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Base class for CREATE [] FUNCTION.
- */
-public abstract class CreateFunctionStmtBase extends StatementBase {
-
-  // Enums for valid keys for optional arguments.
-  public enum OptArg {
-    COMMENT,
-    SYMBOL,           // Only used for Udfs
-    PREPARE_FN,       // Only used for Udfs
-    CLOSE_FN,         // Only used for Udfs
-    UPDATE_FN,        // Only used for Udas
-    INIT_FN,          // Only used for Udas
-    SERIALIZE_FN,     // Only used for Udas
-    MERGE_FN,         // Only used for Udas
-    FINALIZE_FN       // Only used for Udas
-  };
-
-  protected final FunctionName fnName_;
-  protected final FunctionArgs args_;
-  protected final TypeDef retTypeDef_;
-  protected final HdfsUri location_;
-  protected final HashMap<CreateFunctionStmtBase.OptArg, String> optArgs_;
-  protected final boolean ifNotExists_;
-
-  // Result of analysis.
-  protected Function fn_;
-
-  // Db object for function fn_. Set in analyze().
-  protected Db db_;
-
-  // Set in analyze()
-  protected String sqlString_;
-
-  protected CreateFunctionStmtBase(FunctionName fnName, FunctionArgs args,
-      TypeDef retTypeDef, HdfsUri location, boolean ifNotExists,
-      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
-    // The return and arg types must either be both null or non-null.
-    Preconditions.checkState(!(args == null ^ retTypeDef == null));
-    fnName_ = fnName;
-    args_ = args;
-    retTypeDef_ = retTypeDef;
-    location_ = location;
-    ifNotExists_ = ifNotExists;
-    optArgs_ = optArgs;
-  }
-
-  public String getComment() { return optArgs_.get(OptArg.COMMENT); }
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public boolean hasSignature() { return args_ != null; }
-
-  public TCreateFunctionParams toThrift() {
-    TCreateFunctionParams params = new TCreateFunctionParams(fn_.toThrift());
-    params.setIf_not_exists(getIfNotExists());
-    params.setFn(fn_.toThrift());
-    return params;
-  }
-
-  // Returns optArg[key], first validating that it is set.
-  protected String checkAndGetOptArg(OptArg key)
-      throws AnalysisException {
-    if (!optArgs_.containsKey(key)) {
-      throw new AnalysisException("Argument '" + key + "' must be set.");
-    }
-    return optArgs_.get(key);
-  }
-
-  protected void checkOptArgNotSet(OptArg key)
-      throws AnalysisException {
-    if (optArgs_.containsKey(key)) {
-      throw new AnalysisException("Optional argument '" + key + "' should not be set.");
-    }
-  }
-
-  // Returns the function's binary type based on the path extension.
-  private TFunctionBinaryType getBinaryType() throws AnalysisException {
-    TFunctionBinaryType binaryType = null;
-    String binaryPath = fn_.getLocation().getLocation();
-    int suffixIndex = binaryPath.lastIndexOf(".");
-    if (suffixIndex != -1) {
-      String suffix = binaryPath.substring(suffixIndex + 1);
-      if (suffix.equalsIgnoreCase("jar")) {
-        binaryType = TFunctionBinaryType.JAVA;
-      } else if (suffix.equalsIgnoreCase("so")) {
-        binaryType = TFunctionBinaryType.NATIVE;
-      } else if (suffix.equalsIgnoreCase("ll")) {
-        binaryType = TFunctionBinaryType.IR;
-      }
-    }
-    if (binaryType == null) {
-      throw new AnalysisException("Unknown binary type: '" + binaryPath +
-          "'. Binary must end in .jar, .so or .ll");
-    }
-    return binaryType;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    // Validate function name is legal
-    fnName_.analyze(analyzer);
-
-    if (hasSignature()) {
-      // Validate function arguments and return type.
-      args_.analyze(analyzer);
-      retTypeDef_.analyze(analyzer);
-      fn_ = createFunction(fnName_, args_.getArgTypes(), retTypeDef_.getType(),
-          args_.hasVarArgs());
-    } else {
-      fn_ = createFunction(fnName_, null, null, false);
-    }
-
-    // For now, if authorization is enabled, the user needs ALL on the server
-    // to create functions.
-    // TODO: this is not the right granularity but acceptable for now.
-    analyzer.registerPrivReq(new PrivilegeRequest(
-        new AuthorizeableFn(fn_.signatureString()), Privilege.ALL));
-
-    Db builtinsDb = analyzer.getCatalog().getDb(Catalog.BUILTINS_DB);
-    if (builtinsDb.containsFunction(fn_.getName())) {
-      throw new AnalysisException("Function cannot have the same name as a builtin: " +
-          fn_.getFunctionName().getFunction());
-    }
-
-    db_ = analyzer.getDb(fn_.dbName(), Privilege.CREATE);
-    Function existingFn = db_.getFunction(fn_, Function.CompareMode.IS_INDISTINGUISHABLE);
-    if (existingFn != null && !ifNotExists_) {
-      throw new AnalysisException(Analyzer.FN_ALREADY_EXISTS_ERROR_MSG +
-          existingFn.signatureString());
-    }
-
-    location_.analyze(analyzer, Privilege.CREATE, FsAction.READ);
-    fn_.setLocation(location_);
-
-    // Check the file type from the binary type to infer the type of the UDA
-    fn_.setBinaryType(getBinaryType());
-
-    // Forbid unsupported and complex types.
-    if (hasSignature()) {
-      List<Type> refdTypes = Lists.newArrayList(fn_.getReturnType());
-      refdTypes.addAll(Lists.newArrayList(fn_.getArgs()));
-      for (Type t: refdTypes) {
-        if (!t.isSupported() || t.isComplexType()) {
-          throw new AnalysisException(
-              String.format("Type '%s' is not supported in UDFs/UDAs.", t.toSql()));
-        }
-      }
-    } else if (fn_.getBinaryType() != TFunctionBinaryType.JAVA) {
-      throw new AnalysisException(
-          String.format("Native functions require a return type and/or " +
-              "argument types: %s", fn_.getFunctionName()));
-    }
-
-    // Check if the function can be persisted. We persist all native/IR functions
-    // and also JAVA functions added without signature. Only JAVA functions added
-    // with signatures aren't persisted.
-    if (getBinaryType() == TFunctionBinaryType.JAVA && hasSignature()) {
-      fn_.setIsPersistent(false);
-    } else {
-      fn_.setIsPersistent(true);
-    }
-  }
-
-  /**
-   * Creates a concrete function.
-   */
-  protected abstract Function createFunction(FunctionName fnName,
-      ArrayList<Type> argTypes, Type retType, boolean hasVarArgs);
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateOrAlterViewStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateOrAlterViewStmtBase.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateOrAlterViewStmtBase.java
deleted file mode 100644
index cc04b04..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateOrAlterViewStmtBase.java
+++ /dev/null
@@ -1,209 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Base class for CREATE VIEW and ALTER VIEW AS SELECT statements.
- */
-public abstract class CreateOrAlterViewStmtBase extends StatementBase {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(CreateOrAlterViewStmtBase.class);
-
-  protected final boolean ifNotExists_;
-  protected final TableName tableName_;
-  protected final ArrayList<ColumnDef> columnDefs_;
-  protected final String comment_;
-  protected final QueryStmt viewDefStmt_;
-
-  // Set during analysis
-  protected String dbName_;
-  protected String owner_;
-
-  // The original SQL-string given as view definition. Set during analysis.
-  // Corresponds to Hive's viewOriginalText.
-  protected String originalViewDef_;
-
-  // Query statement (as SQL string) that defines the View for view substitution.
-  // It is a transformation of the original view definition, e.g., to enforce the
-  // columnDefs even if the original view definition has explicit column aliases.
-  // If column definitions were given, then this "expanded" view definition
-  // wraps the original view definition in a select stmt as follows.
-  //
-  // SELECT viewName.origCol1 AS colDesc1, viewName.origCol2 AS colDesc2, ...
-  // FROM (originalViewDef) AS viewName
-  //
-  // Corresponds to Hive's viewExpandedText, but is not identical to the SQL
-  // Hive would produce in view creation.
-  protected String inlineViewDef_;
-
-  // Columns to use in the select list of the expanded SQL string and when registering
-  // this view in the metastore. Set in analysis.
-  protected ArrayList<ColumnDef> finalColDefs_;
-
-  public CreateOrAlterViewStmtBase(boolean ifNotExists, TableName tableName,
-      ArrayList<ColumnDef> columnDefs, String comment, QueryStmt viewDefStmt) {
-    Preconditions.checkNotNull(tableName);
-    Preconditions.checkNotNull(viewDefStmt);
-    this.ifNotExists_ = ifNotExists;
-    this.tableName_ = tableName;
-    this.columnDefs_ = columnDefs;
-    this.comment_ = comment;
-    this.viewDefStmt_ = viewDefStmt;
-  }
-
-  /**
-   * Sets the originalViewDef and the expanded inlineViewDef based on viewDefStmt.
-   * If columnDefs were given, checks that they do not contain duplicate column names
-   * and throws an exception if they do.
-   */
-  protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkNotNull(dbName_);
-    Preconditions.checkNotNull(owner_);
-
-    // Set the finalColDefs to reflect the given column definitions.
-    if (columnDefs_ != null) {
-      Preconditions.checkState(!columnDefs_.isEmpty());
-      if (columnDefs_.size() != viewDefStmt_.getColLabels().size()) {
-        String cmp =
-            (columnDefs_.size() > viewDefStmt_.getColLabels().size()) ? "more" : "fewer";
-        throw new AnalysisException(String.format("Column-definition list has " +
-            "%s columns (%s) than the view-definition query statement returns (%s).",
-            cmp, columnDefs_.size(), viewDefStmt_.getColLabels().size()));
-      }
-
-      finalColDefs_ = columnDefs_;
-      Preconditions.checkState(
-          columnDefs_.size() == viewDefStmt_.getBaseTblResultExprs().size());
-      for (int i = 0; i < columnDefs_.size(); ++i) {
-        // Set type in the column definition from the view-definition statement.
-        columnDefs_.get(i).setType(viewDefStmt_.getBaseTblResultExprs().get(i).getType());
-      }
-    } else {
-      // Create list of column definitions from the view-definition statement.
-      finalColDefs_ = Lists.newArrayList();
-      List<Expr> exprs = viewDefStmt_.getBaseTblResultExprs();
-      List<String> labels = viewDefStmt_.getColLabels();
-      Preconditions.checkState(exprs.size() == labels.size());
-      for (int i = 0; i < viewDefStmt_.getColLabels().size(); ++i) {
-        ColumnDef colDef = new ColumnDef(labels.get(i), null, null);
-        colDef.setType(exprs.get(i).getType());
-        finalColDefs_.add(colDef);
-      }
-    }
-
-    // Check that the column definitions have valid names, and that there are no
-    // duplicate column names.
-    Set<String> distinctColNames = Sets.newHashSet();
-    for (ColumnDef colDesc: finalColDefs_) {
-      colDesc.analyze();
-      if (!distinctColNames.add(colDesc.getColName().toLowerCase())) {
-        throw new AnalysisException("Duplicate column name: " + colDesc.getColName());
-      }
-    }
-
-    // Set original and expanded view-definition SQL strings.
-    originalViewDef_ = viewDefStmt_.toSql();
-
-    // If no column definitions were given, then the expanded view SQL is the same
-    // as the original one.
-    if (columnDefs_ == null) {
-      inlineViewDef_ = originalViewDef_;
-      return;
-    }
-
-    // Wrap the original view-definition statement into a SELECT to enforce the
-    // given column definitions.
-    StringBuilder sb = new StringBuilder();
-    sb.append("SELECT ");
-    for (int i = 0; i < finalColDefs_.size(); ++i) {
-      String colRef = ToSqlUtils.getIdentSql(viewDefStmt_.getColLabels().get(i));
-      String colAlias = ToSqlUtils.getIdentSql(finalColDefs_.get(i).getColName());
-      sb.append(String.format("%s.%s AS %s", tableName_.getTbl(), colRef, colAlias));
-      sb.append((i+1 != finalColDefs_.size()) ? ", " : "");
-    }
-    // Do not use 'AS' for table aliases because Hive only accepts them without 'AS'.
-    sb.append(String.format(" FROM (%s) %s", originalViewDef_, tableName_.getTbl()));
-    inlineViewDef_ = sb.toString();
-  }
-
-  /**
-   * Computes the column lineage graph for a create/alter view statetement.
-   */
-  protected void computeLineageGraph(Analyzer analyzer) {
-    ColumnLineageGraph graph = analyzer.getColumnLineageGraph();
-    List<String> colDefs = Lists.newArrayList();
-    for (ColumnDef colDef: finalColDefs_) {
-      colDefs.add(dbName_ + "." + getTbl() + "." + colDef.getColName());
-    }
-    graph.addTargetColumnLabels(colDefs);
-    graph.computeLineageGraph(viewDefStmt_.getResultExprs(), analyzer);
-    LOG.trace("lineage: " + graph.debugString());
-  }
-
-  public TCreateOrAlterViewParams toThrift() {
-    TCreateOrAlterViewParams params = new TCreateOrAlterViewParams();
-    params.setView_name(new TTableName(getDb(), getTbl()));
-    for (ColumnDef col: finalColDefs_) {
-      params.addToColumns(col.toThrift());
-    }
-    params.setOwner(getOwner());
-    params.setIf_not_exists(getIfNotExists());
-    params.setOriginal_view_def(originalViewDef_);
-    params.setExpanded_view_def(inlineViewDef_);
-    if (comment_ != null) params.setComment(comment_);
-    return params;
-  }
-
-  /**
-   * Can only be called after analysis, returns the name of the database the table will
-   * be created within.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  /**
-   * Can only be called after analysis, returns the owner of the view to be created.
-   */
-  public String getOwner() {
-    Preconditions.checkNotNull(owner_);
-    return owner_;
-  }
-
-  public List<ColumnDef> getColumnDescs() {return columnDefs_; }
-  public String getComment() { return comment_; }
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public String getOriginalViewDef() { return originalViewDef_; }
-  public String getInlineViewDef() { return inlineViewDef_; }
-  public String getTbl() { return tableName_.getTbl(); }
-}



[09/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
new file mode 100644
index 0000000..e9c9a14
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.ColumnStats;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+
+/**
+* Represents an ALTER TABLE [<dbName>.]<tableName> SET COLUMN STATS <colName>
+* ('statsKey'='val','statsKey2',='val2') statement.
+*
+* The keys as well as the values are specified as string literals to be consistent
+* with the existing DDL for setting TBLPROPERTIES/SERDEPROPERTIES, in particular,
+* setting the 'numRows' table/partition property.
+*
+* Stats key comparisons are case-insensitive.
+*/
+public class AlterTableSetColumnStats extends AlterTableStmt {
+  private final String colName_;
+  private final HashMap<String, String> statsMap_;
+
+  // Complete column stats reflecting this alteration. Existing stats values
+  // are preserved. Result of analysis.
+  private ColumnStats colStats_;
+
+  public AlterTableSetColumnStats(TableName tableName, String colName,
+      HashMap<String, String> statsMap) {
+    super(tableName);
+    colName_ = colName;
+    statsMap_ = statsMap;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    Column col = getTargetTable().getColumn(colName_);
+    if (col == null) {
+      throw new AnalysisException(
+          String.format("Column '%s' does not exist in table: %s",
+              colName_, getTargetTable().getFullName()));
+    }
+    // Cannot update stats on partition columns because the HMS has no entries
+    // for them, and the stats can be computed directly from the metadata.
+    if (col.getPosition() < getTargetTable().getNumClusteringCols()) {
+      throw new AnalysisException(
+          "Updating the stats of a partition column is not allowed: " + colName_);
+    }
+    // Cannot update the stats if they are not supported for the column's type.
+    if (!ColumnStats.isSupportedColType(col.getType())) {
+      throw new AnalysisException(String.format(
+          "Statistics for column '%s' are not supported because " +
+          "it has type '%s'.", col.getName(), col.getType().toSql()));
+    }
+
+    // Copy the existing stats and then change the values according to the
+    // stats map of this stmt. The existing stats are first copied to preserve
+    // those stats values that are not changed by this stmt because all stats
+    // values are updated when altering the stats in the HMS.
+    colStats_ = col.getStats().clone();
+    for (Map.Entry<String, String> entry: statsMap_.entrySet()) {
+      ColumnStats.StatsKey statsKey = ColumnStats.StatsKey.fromString(entry.getKey());
+      if (statsKey == null) {
+        throw new AnalysisException(String.format(
+            "Invalid column stats key: %s\nValid keys are: %s",
+            entry.getKey(), Joiner.on(',').join(ColumnStats.StatsKey.values())));
+      }
+      setStatsValue(statsKey, entry.getValue(), col, colStats_);
+    }
+  }
+
+  /**
+   * Updates the given column stats based on statsKey and statsValue.
+   * Throws an AnalysisException if the statsValue is invalid or not applicable to the
+   * column (e.g., trying to update the avg/max size of a fixed-length column).
+   */
+  private void setStatsValue(ColumnStats.StatsKey statsKey, String statsValue,
+      Column col, ColumnStats stats) throws AnalysisException {
+    // Updating max/avg size is only allowed for variable length columns.
+    if (col.getType().isFixedLengthType()
+        && (statsKey == ColumnStats.StatsKey.AVG_SIZE
+            || statsKey == ColumnStats.StatsKey.MAX_SIZE)) {
+      throw new AnalysisException(String.format(
+          "Cannot update the '%s' stats of column '%s' with type '%s'.\n" +
+          "Changing '%s' is only allowed for variable-length columns.",
+          statsKey, col.getName(), col.getType().toSql(), statsKey));
+    }
+
+    if (statsKey == ColumnStats.StatsKey.NUM_DISTINCT_VALUES ||
+        statsKey == ColumnStats.StatsKey.NUM_NULLS ||
+        statsKey == ColumnStats.StatsKey.MAX_SIZE) {
+      Long statsVal = null;
+      try {
+        statsVal = Long.parseLong(statsValue);
+      } catch (Exception e) {
+      }
+      if (statsVal == null || statsVal < -1) {
+        throw new AnalysisException(String.format(
+            "Invalid stats value '%s' for column stats key: %s\n" +
+            "Expected a positive integer or -1 for unknown.",
+            statsValue, statsKey));
+      }
+      stats.update(statsKey, statsVal);
+    } else if (statsKey == ColumnStats.StatsKey.AVG_SIZE) {
+      Float statsVal = null;
+      try {
+        statsVal = Float.parseFloat(statsValue);
+      } catch (Exception e) {
+      }
+      if (statsVal == null || (statsVal < 0 && statsVal != -1) ||
+          statsVal.isNaN() || statsVal.isInfinite()) {
+        throw new AnalysisException(String.format(
+            "Invalid stats value '%s' for column stats key: %s\n" +
+            "Expected a positive floating-point number or -1 for unknown.",
+            statsValue, statsKey));
+      }
+      stats.update(statsKey, statsVal);
+    } else {
+      Preconditions.checkState(false, "Unhandled StatsKey value: " + statsKey);
+    }
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+   TAlterTableParams params = super.toThrift();
+   params.setAlter_type(TAlterTableType.UPDATE_STATS);
+   TAlterTableUpdateStatsParams updateStatsParams =
+       new TAlterTableUpdateStatsParams();
+   updateStatsParams.setTable_name(getTargetTable().getTableName().toThrift());
+   updateStatsParams.putToColumn_stats(colName_.toString(), colStats_.toThrift());
+   params.setUpdate_stats_params(updateStatsParams);
+   return params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
new file mode 100644
index 0000000..7e18aa6
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET FILEFORMAT statement.
+ */
+public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
+  private final THdfsFileFormat fileFormat_;
+
+  public AlterTableSetFileFormatStmt(TableName tableName,
+      PartitionSpec partitionSpec, THdfsFileFormat fileFormat) {
+    super(tableName, partitionSpec);
+    this.fileFormat_ = fileFormat;
+  }
+
+  public THdfsFileFormat getFileFormat() { return fileFormat_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_FILE_FORMAT);
+    TAlterTableSetFileFormatParams fileFormatParams =
+        new TAlterTableSetFileFormatParams(fileFormat_);
+    if (getPartitionSpec() != null) {
+      fileFormatParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    params.setSet_file_format_params(fileFormatParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
new file mode 100644
index 0000000..fcc9b53
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -0,0 +1,86 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HdfsPartition;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.permission.FsAction;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET LOCATION statement.
+ */
+public class AlterTableSetLocationStmt extends AlterTableSetStmt {
+  private final HdfsUri location_;
+
+  public AlterTableSetLocationStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsUri location) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(location);
+    this.location_ = location;
+  }
+
+  public HdfsUri getLocation() { return location_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_LOCATION);
+    TAlterTableSetLocationParams locationParams =
+        new TAlterTableSetLocationParams(location_.toString());
+    if (getPartitionSpec() != null) {
+      locationParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    params.setSet_location_params(locationParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+
+    Table table = getTargetTable();
+    Preconditions.checkNotNull(table);
+    if (table instanceof HdfsTable) {
+      HdfsTable hdfsTable = (HdfsTable) table;
+      if (getPartitionSpec() != null) {
+        // Targeting a partition rather than a table.
+        PartitionSpec partitionSpec = getPartitionSpec();
+        HdfsPartition partition = hdfsTable.getPartition(
+            partitionSpec.getPartitionSpecKeyValues());
+        Preconditions.checkNotNull(partition);
+        if (partition.isMarkedCached()) {
+          throw new AnalysisException(String.format("Target partition is cached, " +
+              "please uncache before changing the location using: ALTER TABLE %s %s " +
+              "SET UNCACHED", table.getFullName(), partitionSpec.toSql()));
+        }
+      } else if (hdfsTable.isMarkedCached()) {
+        throw new AnalysisException(String.format("Target table is cached, please " +
+            "uncache before changing the location using: ALTER TABLE %s SET UNCACHED",
+            table.getFullName()));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
new file mode 100644
index 0000000..2857211
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+
+/**
+ * Base class for all ALTER TABLE ... SET statements
+ */
+public class AlterTableSetStmt extends AlterTableStmt {
+  protected final PartitionSpec partitionSpec_;
+
+  public AlterTableSetStmt(TableName tableName, PartitionSpec partitionSpec) {
+    super(tableName);
+    partitionSpec_ = partitionSpec;
+    if (partitionSpec_ != null) partitionSpec_.setTableName(tableName);
+  }
+
+  public PartitionSpec getPartitionSpec() { return partitionSpec_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support ALTER TABLE SET on HBase tables. Requires validating changes
+    // to the SERDEPROPERTIES and TBLPROPERTIES to ensure the table metadata does not
+    // become invalid.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE SET not currently supported on " +
+          "HBase tables.");
+    }
+
+    // Altering the table rather than the partition.
+    if (partitionSpec_ == null) return;
+
+    partitionSpec_.setPartitionShouldExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
new file mode 100644
index 0000000..da92267
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -0,0 +1,150 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.avro.SchemaParseException;
+import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
+
+import com.cloudera.impala.catalog.HdfsFileFormat;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.*;
+import com.cloudera.impala.util.AvroSchemaParser;
+import com.cloudera.impala.util.AvroSchemaUtils;
+import com.cloudera.impala.util.MetaStoreUtil;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+
+/**
+* Represents an ALTER TABLE SET [PARTITION ('k1'='a', 'k2'='b'...)]
+* TBLPROPERTIES|SERDEPROPERTIES ('p1'='v1', ...) statement.
+*/
+public class AlterTableSetTblProperties extends AlterTableSetStmt {
+  private final TTablePropertyType targetProperty_;
+  private final HashMap<String, String> tblProperties_;
+
+  public AlterTableSetTblProperties(TableName tableName, PartitionSpec partitionSpec,
+      TTablePropertyType targetProperty, HashMap<String, String> tblProperties) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(tblProperties);
+    Preconditions.checkNotNull(targetProperty);
+    targetProperty_ = targetProperty;
+    tblProperties_ = tblProperties;
+    CreateTableStmt.unescapeProperties(tblProperties_);
+  }
+
+  public HashMap<String, String> getTblProperties() { return tblProperties_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+   TAlterTableParams params = super.toThrift();
+   params.setAlter_type(TAlterTableType.SET_TBL_PROPERTIES);
+   TAlterTableSetTblPropertiesParams tblPropertyParams =
+       new TAlterTableSetTblPropertiesParams();
+   tblPropertyParams.setTarget(targetProperty_);
+   tblPropertyParams.setProperties(tblProperties_);
+   if (partitionSpec_ != null) {
+     tblPropertyParams.setPartition_spec(partitionSpec_.toThrift());
+   }
+   params.setSet_tbl_properties_params(tblPropertyParams);
+   return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
+
+    // Check avro schema when it is set in avro.schema.url or avro.schema.literal to
+    // avoid potential metadata corruption (see IMPALA-2042).
+    // If both properties are set then only check avro.schema.literal and ignore
+    // avro.schema.url.
+    if (tblProperties_.containsKey(
+            AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()) ||
+        tblProperties_.containsKey(
+            AvroSerdeUtils.AvroTableProperties.SCHEMA_URL.getPropName())) {
+      analyzeAvroSchema(analyzer);
+    }
+
+    // Analyze 'skip.header.line.format' property.
+    analyzeSkipHeaderLineCount(getTargetTable(), tblProperties_);
+  }
+
+  /**
+   * Check that Avro schema provided in avro.schema.url or avro.schema.literal is valid
+   * Json and contains only supported Impala types. If both properties are set, then
+   * avro.schema.url is ignored.
+   */
+  private void analyzeAvroSchema(Analyzer analyzer)
+      throws AnalysisException {
+    List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
+    schemaSearchLocations.add(tblProperties_);
+
+    String avroSchema = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
+    avroSchema = Strings.nullToEmpty(avroSchema);
+    if (avroSchema.isEmpty()) {
+      throw new AnalysisException("Avro schema is null or empty: " +
+          table_.getFullName());
+    }
+
+    // Check if the schema is valid and is supported by Impala
+    try {
+      AvroSchemaParser.parse(avroSchema);
+    } catch (SchemaParseException e) {
+      throw new AnalysisException(String.format(
+          "Error parsing Avro schema for table '%s': %s", table_.getFullName(),
+          e.getMessage()));
+    }
+  }
+
+  /**
+   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
+   * value. It is looked up in 'tblProperties', which must not be null.
+   */
+  public static void analyzeSkipHeaderLineCount(Map<String, String> tblProperties)
+      throws AnalysisException {
+    analyzeSkipHeaderLineCount(null, tblProperties);
+  }
+
+  /**
+   * Analyze the 'skip.header.line.count' property to make sure it is set to a valid
+   * value. It is looked up in 'tblProperties', which must not be null. If 'table' is not
+   * null, then the method ensures that 'skip.header.line.count' is supported for its
+   * table type. If it is null, then this check is omitted.
+   */
+  public static void analyzeSkipHeaderLineCount(Table table,
+      Map<String, String> tblProperties) throws AnalysisException {
+    if (tblProperties.containsKey(HdfsTable.TBL_PROP_SKIP_HEADER_LINE_COUNT)) {
+      if (table != null && !(table instanceof HdfsTable)) {
+        throw new AnalysisException(String.format("Table property " +
+            "'skip.header.line.count' is only supported for HDFS tables."));
+      }
+      StringBuilder error = new StringBuilder();
+      HdfsTable.parseSkipHeaderLineCount(tblProperties, error);
+      if (error.length() > 0) throw new AnalysisException(error.toString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
new file mode 100644
index 0000000..aebd009
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -0,0 +1,91 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.DataSourceTable;
+import com.cloudera.impala.catalog.KuduTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for all ALTER TABLE statements.
+ */
+public abstract class AlterTableStmt extends StatementBase {
+  protected final TableName tableName_;
+
+  // Set during analysis.
+  protected Table table_;
+
+  protected AlterTableStmt(TableName tableName) {
+    Preconditions.checkState(tableName != null && !tableName.isEmpty());
+    tableName_ = tableName;
+    table_ = null;
+  }
+
+  public String getTbl() { return tableName_.getTbl(); }
+
+  /**
+   * Can only be called after analysis, returns the parent database name of the target
+   * table for this ALTER TABLE statement.
+   */
+  public String getDb() {
+    return getTargetTable().getDb().getName();
+  }
+
+  /**
+   * Can only be called after analysis, returns the Table object of the target of this
+   * ALTER TABLE statement.
+   */
+  protected Table getTargetTable() {
+    Preconditions.checkNotNull(table_);
+    return table_;
+  }
+
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = new TAlterTableParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+    if (table_ instanceof KuduTable
+        && !(this instanceof AlterTableSetTblProperties)
+        && !(this instanceof AlterTableSetColumnStats)
+        && !(this instanceof AlterTableOrViewRenameStmt)) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on Kudu table: %s", table_.getFullName()));
+    }
+    if (table_ instanceof View) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
+    }
+    if (table_ instanceof DataSourceTable
+        && !(this instanceof AlterTableSetColumnStats)) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a table produced by a data source: %s",
+          table_.getFullName()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
new file mode 100644
index 0000000..640b3a1
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.RuntimeEnv;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER VIEW AS statement.
+ */
+public class AlterViewStmt extends CreateOrAlterViewStmtBase {
+
+  public AlterViewStmt(TableName tableName, QueryStmt viewDefStmt) {
+    super(false, tableName, null, null, viewDefStmt);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    // Enforce Hive column labels for view compatibility.
+    analyzer.setUseHiveColLabels(true);
+    viewDefStmt_.analyze(analyzer);
+
+    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    owner_ = analyzer.getUser().getName();
+
+    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
+    Preconditions.checkNotNull(table);
+    if (!(table instanceof View)) {
+      throw new AnalysisException(String.format(
+          "ALTER VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
+    }
+
+    createColumnAndViewDefs(analyzer);
+    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
+      computeLineageGraph(analyzer);
+    }
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("ALTER VIEW ");
+    if (tableName_.getDb() != null) {
+      sb.append(tableName_.getDb() + ".");
+    }
+    sb.append(tableName_.getTbl());
+    sb.append(" AS " + viewDefStmt_.toSql());
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
new file mode 100644
index 0000000..006474d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -0,0 +1,544 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.authorization.AuthorizationChecker;
+import com.cloudera.impala.authorization.AuthorizationConfig;
+import com.cloudera.impala.authorization.AuthorizeableColumn;
+import com.cloudera.impala.authorization.AuthorizeableTable;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequest;
+import com.cloudera.impala.catalog.AuthorizationException;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.ImpaladCatalog;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.common.Pair;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TLineageGraph;
+import com.cloudera.impala.thrift.TQueryCtx;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Wrapper class for parser and analyzer.
+ */
+public class AnalysisContext {
+  private final static Logger LOG = LoggerFactory.getLogger(AnalysisContext.class);
+  private final ImpaladCatalog catalog_;
+  private final TQueryCtx queryCtx_;
+  private final AuthorizationConfig authzConfig_;
+
+  // Set in analyze()
+  private AnalysisResult analysisResult_;
+
+  public AnalysisContext(ImpaladCatalog catalog, TQueryCtx queryCtx,
+      AuthorizationConfig authzConfig) {
+    catalog_ = catalog;
+    queryCtx_ = queryCtx;
+    authzConfig_ = authzConfig;
+  }
+
+  static public class AnalysisResult {
+    private StatementBase stmt_;
+    private Analyzer analyzer_;
+    private CreateTableStmt tmpCreateTableStmt_;
+
+    public boolean isAlterTableStmt() { return stmt_ instanceof AlterTableStmt; }
+    public boolean isAlterViewStmt() { return stmt_ instanceof AlterViewStmt; }
+    public boolean isComputeStatsStmt() { return stmt_ instanceof ComputeStatsStmt; }
+    public boolean isQueryStmt() { return stmt_ instanceof QueryStmt; }
+    public boolean isInsertStmt() { return stmt_ instanceof InsertStmt; }
+    public boolean isDropDbStmt() { return stmt_ instanceof DropDbStmt; }
+    public boolean isDropTableOrViewStmt() {
+      return stmt_ instanceof DropTableOrViewStmt;
+    }
+    public boolean isDropFunctionStmt() { return stmt_ instanceof DropFunctionStmt; }
+    public boolean isDropDataSrcStmt() { return stmt_ instanceof DropDataSrcStmt; }
+    public boolean isDropStatsStmt() { return stmt_ instanceof DropStatsStmt; }
+    public boolean isCreateTableLikeStmt() {
+      return stmt_ instanceof CreateTableLikeStmt;
+    }
+    public boolean isCreateViewStmt() { return stmt_ instanceof CreateViewStmt; }
+    public boolean isCreateTableAsSelectStmt() {
+      return stmt_ instanceof CreateTableAsSelectStmt;
+    }
+    public boolean isCreateTableStmt() { return stmt_ instanceof CreateTableStmt; }
+    public boolean isCreateDbStmt() { return stmt_ instanceof CreateDbStmt; }
+    public boolean isCreateUdfStmt() { return stmt_ instanceof CreateUdfStmt; }
+    public boolean isCreateUdaStmt() { return stmt_ instanceof CreateUdaStmt; }
+    public boolean isCreateDataSrcStmt() { return stmt_ instanceof CreateDataSrcStmt; }
+    public boolean isLoadDataStmt() { return stmt_ instanceof LoadDataStmt; }
+    public boolean isUseStmt() { return stmt_ instanceof UseStmt; }
+    public boolean isSetStmt() { return stmt_ instanceof SetStmt; }
+    public boolean isShowTablesStmt() { return stmt_ instanceof ShowTablesStmt; }
+    public boolean isShowDbsStmt() { return stmt_ instanceof ShowDbsStmt; }
+    public boolean isShowDataSrcsStmt() { return stmt_ instanceof ShowDataSrcsStmt; }
+    public boolean isShowStatsStmt() { return stmt_ instanceof ShowStatsStmt; }
+    public boolean isShowFunctionsStmt() { return stmt_ instanceof ShowFunctionsStmt; }
+    public boolean isShowCreateTableStmt() {
+      return stmt_ instanceof ShowCreateTableStmt;
+    }
+    public boolean isShowCreateFunctionStmt() {
+      return stmt_ instanceof ShowCreateFunctionStmt;
+    }
+    public boolean isShowFilesStmt() { return stmt_ instanceof ShowFilesStmt; }
+    public boolean isDescribeDbStmt() { return stmt_ instanceof DescribeDbStmt; }
+    public boolean isDescribeTableStmt() { return stmt_ instanceof DescribeTableStmt; }
+    public boolean isResetMetadataStmt() { return stmt_ instanceof ResetMetadataStmt; }
+    public boolean isExplainStmt() { return stmt_.isExplain(); }
+    public boolean isShowRolesStmt() { return stmt_ instanceof ShowRolesStmt; }
+    public boolean isShowGrantRoleStmt() { return stmt_ instanceof ShowGrantRoleStmt; }
+    public boolean isCreateDropRoleStmt() { return stmt_ instanceof CreateDropRoleStmt; }
+    public boolean isGrantRevokeRoleStmt() {
+      return stmt_ instanceof GrantRevokeRoleStmt;
+    }
+    public boolean isGrantRevokePrivStmt() {
+      return stmt_ instanceof GrantRevokePrivStmt;
+    }
+    public boolean isTruncateStmt() { return stmt_ instanceof TruncateStmt; }
+    public boolean isUpdateStmt() { return stmt_ instanceof UpdateStmt; }
+    public UpdateStmt getUpdateStmt() { return (UpdateStmt) stmt_; }
+    public boolean isDeleteStmt() { return stmt_ instanceof DeleteStmt; }
+    public DeleteStmt getDeleteStmt() { return (DeleteStmt) stmt_; }
+
+    public boolean isCatalogOp() {
+      return isUseStmt() || isViewMetadataStmt() || isDdlStmt();
+    }
+
+    private boolean isDdlStmt() {
+      return isCreateTableLikeStmt() || isCreateTableStmt() ||
+          isCreateViewStmt() || isCreateDbStmt() || isDropDbStmt() ||
+          isDropTableOrViewStmt() || isResetMetadataStmt() || isAlterTableStmt() ||
+          isAlterViewStmt() || isComputeStatsStmt() || isCreateUdfStmt() ||
+          isCreateUdaStmt() || isDropFunctionStmt() || isCreateTableAsSelectStmt() ||
+          isCreateDataSrcStmt() || isDropDataSrcStmt() || isDropStatsStmt() ||
+          isCreateDropRoleStmt() || isGrantRevokeStmt() || isTruncateStmt();
+    }
+
+    private boolean isViewMetadataStmt() {
+      return isShowFilesStmt() || isShowTablesStmt() || isShowDbsStmt() ||
+          isShowFunctionsStmt() || isShowRolesStmt() || isShowGrantRoleStmt() ||
+          isShowCreateTableStmt() || isShowDataSrcsStmt() || isShowStatsStmt() ||
+          isDescribeTableStmt() || isDescribeDbStmt() || isShowCreateFunctionStmt();
+    }
+
+    private boolean isGrantRevokeStmt() {
+      return isGrantRevokeRoleStmt() || isGrantRevokePrivStmt();
+    }
+
+    public boolean isDmlStmt() {
+      return isInsertStmt();
+    }
+
+    public AlterTableStmt getAlterTableStmt() {
+      Preconditions.checkState(isAlterTableStmt());
+      return (AlterTableStmt) stmt_;
+    }
+
+    public AlterViewStmt getAlterViewStmt() {
+      Preconditions.checkState(isAlterViewStmt());
+      return (AlterViewStmt) stmt_;
+    }
+
+    public ComputeStatsStmt getComputeStatsStmt() {
+      Preconditions.checkState(isComputeStatsStmt());
+      return (ComputeStatsStmt) stmt_;
+    }
+
+    public CreateTableLikeStmt getCreateTableLikeStmt() {
+      Preconditions.checkState(isCreateTableLikeStmt());
+      return (CreateTableLikeStmt) stmt_;
+    }
+
+    public CreateViewStmt getCreateViewStmt() {
+      Preconditions.checkState(isCreateViewStmt());
+      return (CreateViewStmt) stmt_;
+    }
+
+    public CreateTableAsSelectStmt getCreateTableAsSelectStmt() {
+      Preconditions.checkState(isCreateTableAsSelectStmt());
+      return (CreateTableAsSelectStmt) stmt_;
+    }
+
+    public CreateTableStmt getCreateTableStmt() {
+      Preconditions.checkState(isCreateTableStmt());
+      return (CreateTableStmt) stmt_;
+    }
+
+    public CreateTableStmt getTmpCreateTableStmt() {
+      return tmpCreateTableStmt_;
+    }
+
+    public CreateDbStmt getCreateDbStmt() {
+      Preconditions.checkState(isCreateDbStmt());
+      return (CreateDbStmt) stmt_;
+    }
+
+    public CreateUdfStmt getCreateUdfStmt() {
+      Preconditions.checkState(isCreateUdfStmt());
+      return (CreateUdfStmt) stmt_;
+    }
+
+    public CreateUdaStmt getCreateUdaStmt() {
+      Preconditions.checkState(isCreateUdfStmt());
+      return (CreateUdaStmt) stmt_;
+    }
+
+    public DropDbStmt getDropDbStmt() {
+      Preconditions.checkState(isDropDbStmt());
+      return (DropDbStmt) stmt_;
+    }
+
+    public DropTableOrViewStmt getDropTableOrViewStmt() {
+      Preconditions.checkState(isDropTableOrViewStmt());
+      return (DropTableOrViewStmt) stmt_;
+    }
+
+    public TruncateStmt getTruncateStmt() {
+      Preconditions.checkState(isTruncateStmt());
+      return (TruncateStmt) stmt_;
+    }
+
+    public DropFunctionStmt getDropFunctionStmt() {
+      Preconditions.checkState(isDropFunctionStmt());
+      return (DropFunctionStmt) stmt_;
+    }
+
+    public LoadDataStmt getLoadDataStmt() {
+      Preconditions.checkState(isLoadDataStmt());
+      return (LoadDataStmt) stmt_;
+    }
+
+    public QueryStmt getQueryStmt() {
+      Preconditions.checkState(isQueryStmt());
+      return (QueryStmt) stmt_;
+    }
+
+    public InsertStmt getInsertStmt() {
+      if (isCreateTableAsSelectStmt()) {
+        return getCreateTableAsSelectStmt().getInsertStmt();
+      } else {
+        Preconditions.checkState(isInsertStmt());
+        return (InsertStmt) stmt_;
+      }
+    }
+
+    public UseStmt getUseStmt() {
+      Preconditions.checkState(isUseStmt());
+      return (UseStmt) stmt_;
+    }
+
+    public SetStmt getSetStmt() {
+      Preconditions.checkState(isSetStmt());
+      return (SetStmt) stmt_;
+    }
+
+    public ShowTablesStmt getShowTablesStmt() {
+      Preconditions.checkState(isShowTablesStmt());
+      return (ShowTablesStmt) stmt_;
+    }
+
+    public ShowDbsStmt getShowDbsStmt() {
+      Preconditions.checkState(isShowDbsStmt());
+      return (ShowDbsStmt) stmt_;
+    }
+
+    public ShowDataSrcsStmt getShowDataSrcsStmt() {
+      Preconditions.checkState(isShowDataSrcsStmt());
+      return (ShowDataSrcsStmt) stmt_;
+    }
+
+    public ShowStatsStmt getShowStatsStmt() {
+      Preconditions.checkState(isShowStatsStmt());
+      return (ShowStatsStmt) stmt_;
+    }
+
+    public ShowFunctionsStmt getShowFunctionsStmt() {
+      Preconditions.checkState(isShowFunctionsStmt());
+      return (ShowFunctionsStmt) stmt_;
+    }
+
+    public ShowFilesStmt getShowFilesStmt() {
+      Preconditions.checkState(isShowFilesStmt());
+      return (ShowFilesStmt) stmt_;
+    }
+
+    public DescribeDbStmt getDescribeDbStmt() {
+      Preconditions.checkState(isDescribeDbStmt());
+      return (DescribeDbStmt) stmt_;
+    }
+
+    public DescribeTableStmt getDescribeTableStmt() {
+      Preconditions.checkState(isDescribeTableStmt());
+      return (DescribeTableStmt) stmt_;
+    }
+
+    public ShowCreateTableStmt getShowCreateTableStmt() {
+      Preconditions.checkState(isShowCreateTableStmt());
+      return (ShowCreateTableStmt) stmt_;
+    }
+
+    public ShowCreateFunctionStmt getShowCreateFunctionStmt() {
+      Preconditions.checkState(isShowCreateFunctionStmt());
+      return (ShowCreateFunctionStmt) stmt_;
+    }
+
+    public StatementBase getStmt() { return stmt_; }
+    public Analyzer getAnalyzer() { return analyzer_; }
+    public Set<TAccessEvent> getAccessEvents() { return analyzer_.getAccessEvents(); }
+    public boolean requiresRewrite() {
+      return analyzer_.containsSubquery() && !(stmt_ instanceof CreateViewStmt)
+          && !(stmt_ instanceof AlterViewStmt);
+    }
+    public TLineageGraph getThriftLineageGraph() {
+      return analyzer_.getThriftSerializedLineageGraph();
+    }
+  }
+
+  /**
+   * Parse and analyze 'stmt'. If 'stmt' is a nested query (i.e. query that
+   * contains subqueries), it is also rewritten by performing subquery unnesting.
+   * The transformed stmt is then re-analyzed in a new analysis context.
+   *
+   * The result of analysis can be retrieved by calling
+   * getAnalysisResult().
+   *
+   * @throws AnalysisException
+   *           On any other error, including parsing errors. Also thrown when any
+   *           missing tables are detected as a result of running analysis.
+   */
+  public void analyze(String stmt) throws AnalysisException {
+    Analyzer analyzer = new Analyzer(catalog_, queryCtx_, authzConfig_);
+    analyze(stmt, analyzer);
+  }
+
+  /**
+   * Parse and analyze 'stmt' using a specified Analyzer.
+   */
+  public void analyze(String stmt, Analyzer analyzer) throws AnalysisException {
+    SqlScanner input = new SqlScanner(new StringReader(stmt));
+    SqlParser parser = new SqlParser(input);
+    try {
+      analysisResult_ = new AnalysisResult();
+      analysisResult_.analyzer_ = analyzer;
+      if (analysisResult_.analyzer_ == null) {
+        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
+      }
+      analysisResult_.stmt_ = (StatementBase) parser.parse().value;
+      if (analysisResult_.stmt_ == null) return;
+
+      // For CTAS, we copy the create statement in case we have to create a new CTAS
+      // statement after a query rewrite.
+      if (analysisResult_.stmt_ instanceof CreateTableAsSelectStmt) {
+        analysisResult_.tmpCreateTableStmt_ =
+            ((CreateTableAsSelectStmt)analysisResult_.stmt_).getCreateStmt().clone();
+      }
+
+      analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
+      boolean isExplain = analysisResult_.isExplainStmt();
+
+      // Check if we need to rewrite the statement.
+      if (analysisResult_.requiresRewrite()) {
+        StatementBase rewrittenStmt = StmtRewriter.rewrite(analysisResult_);
+        // Re-analyze the rewritten statement.
+        Preconditions.checkNotNull(rewrittenStmt);
+        analysisResult_ = new AnalysisResult();
+        analysisResult_.analyzer_ = new Analyzer(catalog_, queryCtx_, authzConfig_);
+        analysisResult_.stmt_ = rewrittenStmt;
+        analysisResult_.stmt_.analyze(analysisResult_.analyzer_);
+        LOG.trace("rewrittenStmt: " + rewrittenStmt.toSql());
+        if (isExplain) analysisResult_.stmt_.setIsExplain();
+        Preconditions.checkState(!analysisResult_.requiresRewrite());
+      }
+    } catch (AnalysisException e) {
+      // Don't wrap AnalysisExceptions in another AnalysisException
+      throw e;
+    } catch (Exception e) {
+      throw new AnalysisException(parser.getErrorMsg(stmt), e);
+    }
+  }
+
+  /**
+   * Authorize an analyzed statement.
+   * analyze() must have already been called. Throws an AuthorizationException if the
+   * user doesn't have sufficient privileges to run this statement.
+   */
+  public void authorize(AuthorizationChecker authzChecker)
+      throws AuthorizationException, InternalException {
+    Preconditions.checkNotNull(analysisResult_);
+    Analyzer analyzer = getAnalyzer();
+    // Process statements for which column-level privilege requests may be registered
+    // except for DESCRIBE TABLE or REFRESH/INVALIDATE statements
+    if (analysisResult_.isQueryStmt() || analysisResult_.isInsertStmt() ||
+        analysisResult_.isUpdateStmt() || analysisResult_.isDeleteStmt() ||
+        analysisResult_.isCreateTableAsSelectStmt() ||
+        analysisResult_.isCreateViewStmt() || analysisResult_.isAlterViewStmt()) {
+      // Map of table name to a list of privilege requests associated with that table.
+      // These include both table-level and column-level privilege requests.
+      Map<String, List<PrivilegeRequest>> tablePrivReqs = Maps.newHashMap();
+      // Privilege requests that are not column or table-level.
+      List<PrivilegeRequest> otherPrivReqs = Lists.newArrayList();
+      // Group the registered privilege requests based on the table they reference.
+      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
+        String tableName = privReq.getAuthorizeable().getFullTableName();
+        if (tableName == null) {
+          otherPrivReqs.add(privReq);
+        } else {
+          List<PrivilegeRequest> requests = tablePrivReqs.get(tableName);
+          if (requests == null) {
+            requests = Lists.newArrayList();
+            tablePrivReqs.put(tableName, requests);
+          }
+          // The table-level SELECT must be the first table-level request, and it
+          // must precede all column-level privilege requests.
+          Preconditions.checkState((requests.isEmpty() ||
+              !(privReq.getAuthorizeable() instanceof AuthorizeableColumn)) ||
+              (requests.get(0).getAuthorizeable() instanceof AuthorizeableTable &&
+              requests.get(0).getPrivilege() == Privilege.SELECT));
+          requests.add(privReq);
+        }
+      }
+
+      // Check any non-table, non-column privilege requests first.
+      for (PrivilegeRequest request: otherPrivReqs) {
+        authorizePrivilegeRequest(authzChecker, request);
+      }
+
+      // Authorize table accesses, one table at a time, by considering both table and
+      // column-level privilege requests.
+      for (Map.Entry<String, List<PrivilegeRequest>> entry: tablePrivReqs.entrySet()) {
+        authorizeTableAccess(authzChecker, entry.getValue());
+      }
+    } else {
+      for (PrivilegeRequest privReq: analyzer.getPrivilegeReqs()) {
+        Preconditions.checkState(
+            !(privReq.getAuthorizeable() instanceof AuthorizeableColumn) ||
+            analysisResult_.isDescribeTableStmt() ||
+            analysisResult_.isResetMetadataStmt());
+        authorizePrivilegeRequest(authzChecker, privReq);
+      }
+    }
+
+    // Check any masked requests.
+    for (Pair<PrivilegeRequest, String> maskedReq: analyzer.getMaskedPrivilegeReqs()) {
+      if (!authzChecker.hasAccess(analyzer.getUser(), maskedReq.first)) {
+        throw new AuthorizationException(maskedReq.second);
+      }
+    }
+  }
+
+  /**
+   * Authorize a privilege request.
+   * Throws an AuthorizationException if the user doesn't have sufficient privileges for
+   * this request. Also, checks if the request references a system database.
+   */
+  private void authorizePrivilegeRequest(AuthorizationChecker authzChecker,
+    PrivilegeRequest request) throws AuthorizationException, InternalException {
+    Preconditions.checkNotNull(request);
+    String dbName = null;
+    if (request.getAuthorizeable() != null) {
+      dbName = request.getAuthorizeable().getDbName();
+    }
+    // If this is a system database, some actions should always be allowed
+    // or disabled, regardless of what is in the auth policy.
+    if (dbName != null && checkSystemDbAccess(dbName, request.getPrivilege())) {
+      return;
+    }
+    authzChecker.checkAccess(getAnalyzer().getUser(), request);
+  }
+
+  /**
+   * Authorize a list of privilege requests associated with a single table.
+   * It checks if the user has sufficient table-level privileges and if that is
+   * not the case, it falls back on checking column-level privileges, if any. This
+   * function requires 'SELECT' requests to be ordered by table and then by column
+   * privilege requests. Throws an AuthorizationException if the user doesn't have
+   * sufficient privileges.
+   */
+  private void authorizeTableAccess(AuthorizationChecker authzChecker,
+      List<PrivilegeRequest> requests)
+      throws AuthorizationException, InternalException {
+    Preconditions.checkState(!requests.isEmpty());
+    Analyzer analyzer = getAnalyzer();
+    boolean hasTableSelectPriv = true;
+    boolean hasColumnSelectPriv = false;
+    for (PrivilegeRequest request: requests) {
+      if (request.getAuthorizeable() instanceof AuthorizeableTable) {
+        try {
+          authorizePrivilegeRequest(authzChecker, request);
+        } catch (AuthorizationException e) {
+          // Authorization fails if we fail to authorize any table-level request that is
+          // not a SELECT privilege (e.g. INSERT).
+          if (request.getPrivilege() != Privilege.SELECT) throw e;
+          hasTableSelectPriv = false;
+        }
+      } else {
+        Preconditions.checkState(
+            request.getAuthorizeable() instanceof AuthorizeableColumn);
+        if (hasTableSelectPriv) continue;
+        if (authzChecker.hasAccess(analyzer.getUser(), request)) {
+          hasColumnSelectPriv = true;
+          continue;
+        }
+        // Make sure we don't reveal any column names in the error message.
+        throw new AuthorizationException(String.format("User '%s' does not have " +
+          "privileges to execute '%s' on: %s", analyzer.getUser().getName(),
+          request.getPrivilege().toString(),
+          request.getAuthorizeable().getFullTableName()));
+      }
+    }
+    if (!hasTableSelectPriv && !hasColumnSelectPriv) {
+       throw new AuthorizationException(String.format("User '%s' does not have " +
+          "privileges to execute 'SELECT' on: %s", analyzer.getUser().getName(),
+          requests.get(0).getAuthorizeable().getFullTableName()));
+    }
+  }
+
+  /**
+   * Throws an AuthorizationException if the dbName is a system db
+   * and the user is trying to modify it.
+   * Returns true if this is a system db and the action is allowed.
+   */
+  private boolean checkSystemDbAccess(String dbName, Privilege privilege)
+      throws AuthorizationException {
+    Db db = catalog_.getDb(dbName);
+    if (db != null && db.isSystemDb()) {
+      switch (privilege) {
+        case VIEW_METADATA:
+        case ANY:
+          return true;
+        default:
+          throw new AuthorizationException("Cannot modify system database.");
+      }
+    }
+    return false;
+  }
+
+  public AnalysisResult getAnalysisResult() { return analysisResult_; }
+  public Analyzer getAnalyzer() { return getAnalysisResult().getAnalyzer(); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
new file mode 100644
index 0000000..9abd82d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
@@ -0,0 +1,839 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.analysis.AnalyticWindow.Boundary;
+import com.cloudera.impala.analysis.AnalyticWindow.BoundaryType;
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.common.TreeNode;
+import com.cloudera.impala.service.FeSupport;
+import com.cloudera.impala.thrift.TColumnValue;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.util.TColumnValueUtil;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Representation of an analytic function call with OVER clause.
+ * All "subexpressions" (such as the actual function call parameters as well as the
+ * partition/ordering exprs, etc.) are embedded as children in order to allow expr
+ * substitution:
+ *   function call params: child 0 .. #params
+ *   partition exprs: children #params + 1 .. #params + #partition-exprs
+ *   ordering exprs:
+ *     children #params + #partition-exprs + 1 ..
+ *       #params + #partition-exprs + #order-by-elements
+ *   exprs in windowing clause: remaining children
+ *
+ * Note that it's wrong to embed the FunctionCallExpr itself as a child,
+ * because in 'COUNT(..) OVER (..)' the 'COUNT(..)' is not part of a standard aggregate
+ * computation and must not be substituted as such. However, the parameters of the
+ * analytic function call might reference the output of an aggregate computation
+ * and need to be substituted as such; example: COUNT(COUNT(..)) OVER (..)
+ */
+public class AnalyticExpr extends Expr {
+  private final static Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class);
+
+  private FunctionCallExpr fnCall_;
+  private final List<Expr> partitionExprs_;
+  // These elements are modified to point to the corresponding child exprs to keep them
+  // in sync through expr substitutions.
+  private List<OrderByElement> orderByElements_ = Lists.newArrayList();
+  private AnalyticWindow window_;
+
+  // If set, requires the window to be set to null in resetAnalysisState(). Required for
+  // proper substitution/cloning because standardization may set a window that is illegal
+  // in SQL, and hence, will fail analysis().
+  private boolean resetWindow_ = false;
+
+  // SQL string of this AnalyticExpr before standardization. Returned in toSqlImpl().
+  private String sqlString_;
+
+  private static String LEAD = "lead";
+  private static String LAG = "lag";
+  private static String FIRST_VALUE = "first_value";
+  private static String LAST_VALUE = "last_value";
+  private static String FIRST_VALUE_IGNORE_NULLS = "first_value_ignore_nulls";
+  private static String LAST_VALUE_IGNORE_NULLS = "last_value_ignore_nulls";
+  private static String RANK = "rank";
+  private static String DENSERANK = "dense_rank";
+  private static String ROWNUMBER = "row_number";
+  private static String MIN = "min";
+  private static String MAX = "max";
+  private static String PERCENT_RANK = "percent_rank";
+  private static String CUME_DIST = "cume_dist";
+  private static String NTILE = "ntile";
+
+  // Internal function used to implement FIRST_VALUE with a window rewrite and
+  // additional null handling in the backend.
+  public static String FIRST_VALUE_REWRITE = "first_value_rewrite";
+
+  public AnalyticExpr(FunctionCallExpr fnCall, List<Expr> partitionExprs,
+      List<OrderByElement> orderByElements, AnalyticWindow window) {
+    Preconditions.checkNotNull(fnCall);
+    fnCall_ = fnCall;
+    partitionExprs_ = partitionExprs != null ? partitionExprs : new ArrayList<Expr>();
+    if (orderByElements != null) orderByElements_.addAll(orderByElements);
+    window_ = window;
+    setChildren();
+  }
+
+  /**
+   * clone() c'tor
+   */
+  protected AnalyticExpr(AnalyticExpr other) {
+    super(other);
+    fnCall_ = (FunctionCallExpr) other.fnCall_.clone();
+    for (OrderByElement e: other.orderByElements_) {
+      orderByElements_.add(e.clone());
+    }
+    partitionExprs_ = Expr.cloneList(other.partitionExprs_);
+    window_ = (other.window_ != null ? other.window_.clone() : null);
+    resetWindow_ = other.resetWindow_;
+    sqlString_ = other.sqlString_;
+    setChildren();
+  }
+
+  public FunctionCallExpr getFnCall() { return fnCall_; }
+  public List<Expr> getPartitionExprs() { return partitionExprs_; }
+  public List<OrderByElement> getOrderByElements() { return orderByElements_; }
+  public AnalyticWindow getWindow() { return window_; }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    AnalyticExpr o = (AnalyticExpr)obj;
+    if (!fnCall_.equals(o.getFnCall())) return false;
+    if ((window_ == null) != (o.window_ == null)) return false;
+    if (window_ != null) {
+      if (!window_.equals(o.window_)) return false;
+    }
+    return orderByElements_.equals(o.orderByElements_);
+  }
+
+  /**
+   * Analytic exprs cannot be constant.
+   */
+  @Override
+  public boolean isConstant() { return false; }
+
+  @Override
+  public Expr clone() { return new AnalyticExpr(this); }
+
+  @Override
+  public String toSqlImpl() {
+    if (sqlString_ != null) return sqlString_;
+    StringBuilder sb = new StringBuilder();
+    sb.append(fnCall_.toSql()).append(" OVER (");
+    boolean needsSpace = false;
+    if (!partitionExprs_.isEmpty()) {
+      sb.append("PARTITION BY ").append(Expr.toSql(partitionExprs_));
+      needsSpace = true;
+    }
+    if (!orderByElements_.isEmpty()) {
+      List<String> orderByStrings = Lists.newArrayList();
+      for (OrderByElement e: orderByElements_) {
+        orderByStrings.add(e.toSql());
+      }
+      if (needsSpace) sb.append(" ");
+      sb.append("ORDER BY ").append(Joiner.on(", ").join(orderByStrings));
+      needsSpace = true;
+    }
+    if (window_ != null) {
+      if (needsSpace) sb.append(" ");
+      sb.append(window_.toSql());
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("fn", getFnCall())
+        .add("window", window_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+  }
+
+  private static boolean isAnalyticFn(Function fn) {
+    return fn instanceof AggregateFunction
+        && ((AggregateFunction) fn).isAnalyticFn();
+  }
+
+  private static boolean isAnalyticFn(Function fn, String fnName) {
+    return isAnalyticFn(fn) && fn.functionName().equals(fnName);
+  }
+
+  public static boolean isAggregateFn(Function fn) {
+    return fn instanceof AggregateFunction
+        && ((AggregateFunction) fn).isAggregateFn();
+  }
+
+  public static boolean isPercentRankFn(Function fn) {
+    return isAnalyticFn(fn, PERCENT_RANK);
+  }
+
+  public static boolean isCumeDistFn(Function fn) {
+    return isAnalyticFn(fn, CUME_DIST);
+  }
+
+  public static boolean isNtileFn(Function fn) {
+    return isAnalyticFn(fn, NTILE);
+  }
+
+  static private boolean isOffsetFn(Function fn) {
+    return isAnalyticFn(fn, LEAD) || isAnalyticFn(fn, LAG);
+  }
+
+  static private boolean isMinMax(Function fn) {
+    return isAnalyticFn(fn, MIN) || isAnalyticFn(fn, MAX);
+  }
+
+  static private boolean isRankingFn(Function fn) {
+    return isAnalyticFn(fn, RANK) || isAnalyticFn(fn, DENSERANK) ||
+        isAnalyticFn(fn, ROWNUMBER);
+  }
+
+  /**
+   * Rewrite the following analytic functions:
+   * percent_rank(), cume_dist() and ntile()
+   *
+   * Returns a new Expr if the analytic expr is rewritten, returns null if it's not one
+   * that we want to rewrite.
+   */
+  public static Expr rewrite(AnalyticExpr analyticExpr) {
+    Function fn = analyticExpr.getFnCall().getFn();
+    if (AnalyticExpr.isPercentRankFn(fn)) {
+      return createPercentRank(analyticExpr);
+    } else if (AnalyticExpr.isCumeDistFn(fn)) {
+      return createCumeDist(analyticExpr);
+    } else if (AnalyticExpr.isNtileFn(fn)) {
+      return createNtile(analyticExpr);
+    }
+    return null;
+  }
+
+  /**
+   * Rewrite percent_rank() to the following:
+   *
+   * percent_rank() over([partition by clause] order by clause)
+   *    = (Count == 1) ? 0:(Rank - 1)/(Count - 1)
+   * where,
+   *  Rank = rank() over([partition by clause] order by clause)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createPercentRank(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isPercentRankFn(analyticExpr.getFnCall().getFn()));
+
+    NumericLiteral zero = new NumericLiteral(BigInteger.valueOf(0), ScalarType.BIGINT);
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+    AnalyticExpr rankExpr = create("rank", analyticExpr, true, false);
+
+    ArithmeticExpr arithmeticRewrite =
+      new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
+        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rankExpr, one),
+        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, one));
+
+    List<Expr> ifParams = Lists.newArrayList();
+    ifParams.add(
+      new BinaryPredicate(BinaryPredicate.Operator.EQ, one, countExpr));
+    ifParams.add(zero);
+    ifParams.add(arithmeticRewrite);
+    FunctionCallExpr resultantRewrite = new FunctionCallExpr("if", ifParams);
+
+    return resultantRewrite;
+  }
+
+  /**
+   * Rewrite cume_dist() to the following:
+   *
+   * cume_dist() over([partition by clause] order by clause)
+   *    = ((Count - Rank) + 1)/Count
+   * where,
+   *  Rank = rank() over([partition by clause] order by clause DESC)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createCumeDist(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isCumeDistFn(analyticExpr.getFnCall().getFn()));
+    AnalyticExpr rankExpr = create("rank", analyticExpr, true, true);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    ArithmeticExpr arithmeticRewrite =
+        new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
+          new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
+            new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, rankExpr),
+          one),
+        countExpr);
+    return arithmeticRewrite;
+  }
+
+  /**
+   * Rewrite ntile() to the following:
+   *
+   * ntile(B) over([partition by clause] order by clause)
+   *    = floor(min(Count, B) * (RowNumber - 1)/Count) + 1
+   * where,
+   *  RowNumber = row_number() over([partition by clause] order by clause)
+   *  Count = count() over([partition by clause])
+   */
+  private static Expr createNtile(AnalyticExpr analyticExpr) {
+    Preconditions.checkState(
+        AnalyticExpr.isNtileFn(analyticExpr.getFnCall().getFn()));
+    Expr bucketExpr = analyticExpr.getChild(0);
+    AnalyticExpr rowNumExpr = create("row_number", analyticExpr, true, false);
+    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
+
+    List<Expr> ifParams = Lists.newArrayList();
+    ifParams.add(
+        new BinaryPredicate(BinaryPredicate.Operator.LT, bucketExpr, countExpr));
+    ifParams.add(bucketExpr);
+    ifParams.add(countExpr);
+
+    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
+    ArithmeticExpr minMultiplyRowMinusOne =
+        new ArithmeticExpr(ArithmeticExpr.Operator.MULTIPLY,
+          new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rowNumExpr, one),
+          new FunctionCallExpr("if", ifParams));
+    ArithmeticExpr divideAddOne =
+        new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
+          new ArithmeticExpr(ArithmeticExpr.Operator.INT_DIVIDE,
+            minMultiplyRowMinusOne, countExpr),
+        one);
+    return divideAddOne;
+  }
+
+  /**
+   * Create a new Analytic Expr and associate it with a new function.
+   * Takes a reference analytic expression and clones the partition expressions and the
+   * order by expressions if 'copyOrderBy' is set and optionally reverses it if
+   * 'reverseOrderBy' is set. The new function that it will be associated with is
+   * specified by fnName.
+   */
+  private static AnalyticExpr create(String fnName,
+      AnalyticExpr referenceExpr, boolean copyOrderBy, boolean reverseOrderBy) {
+    FunctionCallExpr fnExpr = new FunctionCallExpr(fnName, new ArrayList<Expr>());
+    fnExpr.setIsAnalyticFnCall(true);
+    List<OrderByElement> orderByElements = null;
+    if (copyOrderBy) {
+      if (reverseOrderBy) {
+        orderByElements = OrderByElement.reverse(referenceExpr.getOrderByElements());
+      } else {
+        orderByElements = Lists.newArrayList();
+        for (OrderByElement elem: referenceExpr.getOrderByElements()) {
+          orderByElements.add(elem.clone());
+        }
+      }
+    }
+    AnalyticExpr analyticExpr = new AnalyticExpr(fnExpr,
+        Expr.cloneList(referenceExpr.getPartitionExprs()), orderByElements, null);
+    return analyticExpr;
+  }
+
+  /**
+   * Checks that the value expr of an offset boundary of a RANGE window is compatible
+   * with orderingExprs (and that there's only a single ordering expr).
+   */
+  private void checkRangeOffsetBoundaryExpr(AnalyticWindow.Boundary boundary)
+      throws AnalysisException {
+    Preconditions.checkState(boundary.getType().isOffset());
+    if (orderByElements_.size() > 1) {
+      throw new AnalysisException("Only one ORDER BY expression allowed if used with "
+          + "a RANGE window with PRECEDING/FOLLOWING: " + toSql());
+    }
+    Expr rangeExpr = boundary.getExpr();
+    if (!Type.isImplicitlyCastable(
+        rangeExpr.getType(), orderByElements_.get(0).getExpr().getType(), false)) {
+      throw new AnalysisException(
+          "The value expression of a PRECEDING/FOLLOWING clause of a RANGE window must "
+            + "be implicitly convertable to the ORDER BY expression's type: "
+            + rangeExpr.toSql() + " cannot be implicitly converted to "
+            + orderByElements_.get(0).getExpr().getType().toSql());
+    }
+  }
+
+  /**
+   * Checks offset of lag()/lead().
+   */
+  void checkOffset(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
+    Preconditions.checkState(getFnCall().getChildren().size() > 1);
+    Expr offset = getFnCall().getChild(1);
+    Preconditions.checkState(offset.getType().isIntegerType());
+    boolean isPosConstant = true;
+    if (!offset.isConstant()) {
+      isPosConstant = false;
+    } else {
+      try {
+        TColumnValue val = FeSupport.EvalConstExpr(offset, analyzer.getQueryCtx());
+        if (TColumnValueUtil.getNumericVal(val) <= 0) isPosConstant = false;
+      } catch (InternalException exc) {
+        throw new AnalysisException(
+            "Couldn't evaluate LEAD/LAG offset: " + exc.getMessage());
+      }
+    }
+    if (!isPosConstant) {
+      throw new AnalysisException(
+          "The offset parameter of LEAD/LAG must be a constant positive integer: "
+            + getFnCall().toSql());
+    }
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    fnCall_.analyze(analyzer);
+    super.analyze(analyzer);
+    type_ = getFnCall().getType();
+
+    for (Expr e: partitionExprs_) {
+      if (e.isConstant()) {
+        throw new AnalysisException(
+            "Expressions in the PARTITION BY clause must not be constant: "
+              + e.toSql() + " (in " + toSql() + ")");
+      } else if (e.getType().isComplexType()) {
+        throw new AnalysisException(String.format("PARTITION BY expression '%s' with " +
+            "complex type '%s' is not supported.", e.toSql(),
+            e.getType().toSql()));
+      }
+    }
+    for (OrderByElement e: orderByElements_) {
+      if (e.getExpr().isConstant()) {
+        throw new AnalysisException(
+            "Expressions in the ORDER BY clause must not be constant: "
+              + e.getExpr().toSql() + " (in " + toSql() + ")");
+      } else if (e.getExpr().getType().isComplexType()) {
+        throw new AnalysisException(String.format("ORDER BY expression '%s' with " +
+            "complex type '%s' is not supported.", e.getExpr().toSql(),
+            e.getExpr().getType().toSql()));
+      }
+    }
+
+    if (getFnCall().getParams().isDistinct()) {
+      throw new AnalysisException(
+          "DISTINCT not allowed in analytic function: " + getFnCall().toSql());
+    }
+
+    if (getFnCall().getParams().isIgnoreNulls()) {
+      String fnName = getFnCall().getFnName().getFunction();
+      if (!fnName.equals(LAST_VALUE) && !fnName.equals(FIRST_VALUE)) {
+        throw new AnalysisException("Function " + fnName.toUpperCase()
+            + " does not accept the keyword IGNORE NULLS.");
+      }
+    }
+
+    // check for correct composition of analytic expr
+    Function fn = getFnCall().getFn();
+    if (!(fn instanceof AggregateFunction)) {
+        throw new AnalysisException(
+            "OVER clause requires aggregate or analytic function: "
+              + getFnCall().toSql());
+    }
+
+    // check for non-analytic aggregate functions
+    if (!isAnalyticFn(fn)) {
+      throw new AnalysisException(
+          String.format("Aggregate function '%s' not supported with OVER clause.",
+              getFnCall().toSql()));
+    }
+
+    if (isAnalyticFn(fn) && !isAggregateFn(fn)) {
+      if (orderByElements_.isEmpty()) {
+        throw new AnalysisException(
+            "'" + getFnCall().toSql() + "' requires an ORDER BY clause");
+      }
+      if ((isRankingFn(fn) || isOffsetFn(fn)) && window_ != null) {
+        throw new AnalysisException(
+            "Windowing clause not allowed with '" + getFnCall().toSql() + "'");
+      }
+      if (isOffsetFn(fn) && getFnCall().getChildren().size() > 1) {
+        checkOffset(analyzer);
+        // check the default, which needs to be a constant at the moment
+        // TODO: remove this check when the backend can handle non-constants
+        if (getFnCall().getChildren().size() > 2) {
+          if (!getFnCall().getChild(2).isConstant()) {
+            throw new AnalysisException(
+                "The default parameter (parameter 3) of LEAD/LAG must be a constant: "
+                  + getFnCall().toSql());
+          }
+        }
+      }
+      if (isNtileFn(fn)) {
+        // TODO: IMPALA-2171:Remove this when ntile() can handle a non-constant argument.
+        if (!getFnCall().getChild(0).isConstant()) {
+          throw new AnalysisException("NTILE() requires a constant argument");
+        }
+        // Check if argument value is zero or negative and throw an exception if found.
+        try {
+          TColumnValue bucketValue =
+              FeSupport.EvalConstExpr(getFnCall().getChild(0), analyzer.getQueryCtx());
+          Long arg = bucketValue.getLong_val();
+          if (arg <= 0) {
+            throw new AnalysisException("NTILE() requires a positive argument: " + arg);
+          }
+        } catch (InternalException e) {
+          throw new AnalysisException(e.toString());
+        }
+      }
+    }
+
+    if (window_ != null) {
+      if (orderByElements_.isEmpty()) {
+        throw new AnalysisException("Windowing clause requires ORDER BY clause: "
+            + toSql());
+      }
+      window_.analyze(analyzer);
+
+      if (!orderByElements_.isEmpty()
+          && window_.getType() == AnalyticWindow.Type.RANGE) {
+        // check that preceding/following ranges match ordering
+        if (window_.getLeftBoundary().getType().isOffset()) {
+          checkRangeOffsetBoundaryExpr(window_.getLeftBoundary());
+        }
+        if (window_.getRightBoundary() != null
+            && window_.getRightBoundary().getType().isOffset()) {
+          checkRangeOffsetBoundaryExpr(window_.getRightBoundary());
+        }
+      }
+    }
+
+    // check nesting
+    if (TreeNode.contains(getChildren(), AnalyticExpr.class)) {
+      throw new AnalysisException(
+          "Nesting of analytic expressions is not allowed: " + toSql());
+    }
+    sqlString_ = toSql();
+
+    standardize(analyzer);
+
+    // min/max is not currently supported on sliding windows (i.e. start bound is not
+    // unbounded).
+    if (window_ != null && isMinMax(fn) &&
+        window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) {
+      throw new AnalysisException(
+          "'" + getFnCall().toSql() + "' is only supported with an "
+            + "UNBOUNDED PRECEDING start bound.");
+    }
+
+    setChildren();
+  }
+
+  /**
+   * If necessary, rewrites the analytic function, window, and/or order-by elements into
+   * a standard format for the purpose of simpler backend execution, as follows:
+   * 1. row_number():
+   *    Set a window from UNBOUNDED PRECEDING to CURRENT_ROW.
+   * 2. lead()/lag():
+   *    Explicitly set the default arguments to for BE simplicity.
+   *    Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING.
+   *    Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
+   * 3. FIRST_VALUE without UNBOUNDED PRECEDING or IGNORE NULLS gets rewritten to use a
+   *    different window and function. There are a few cases:
+   *     a) Start bound is X FOLLOWING or CURRENT ROW (X=0):
+   *        Use 'last_value' with a window where both bounds are X FOLLOWING (or
+   *        CURRENT ROW). Setting the start bound to X following is necessary because the
+   *        X rows at the end of a partition have no rows in their window. Note that X
+   *        FOLLOWING could be rewritten as lead(X) but that would not work for CURRENT
+   *        ROW.
+   *     b) Start bound is X PRECEDING and end bound is CURRENT ROW or FOLLOWING:
+   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. An
+   *        extra parameter '-1' is added to indicate to the backend that NULLs should
+   *        not be added for the first X rows.
+   *     c) Start bound is X PRECEDING and end bound is Y PRECEDING:
+   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. The
+   *        first Y rows in a partition have empty windows and should be NULL. An extra
+   *        parameter with the integer constant Y is added to indicate to the backend
+   *        that NULLs should be added for the first Y rows.
+   *    The performance optimization here and in 5. below cannot be applied in the case of
+   *    IGNORE NULLS because they change what values appear in the window, which in the
+   *    IGNORE NULLS case could mean the correct value to return isn't even in the window,
+   *    eg. if all of the values in the rewritten window are NULL but one of the values in
+   *    the original window isn't.
+   * 4. Start bound is not UNBOUNDED PRECEDING and either the end bound is UNBOUNDED
+   *    FOLLOWING or the function is first_value(... ignore nulls):
+   *    Reverse the ordering and window, and flip first_value() and last_value().
+   * 5. first_value() with UNBOUNDED PRECEDING and not IGNORE NULLS:
+   *    Set the end boundary to CURRENT_ROW.
+   * 6. Rewrite IGNORE NULLS as regular FunctionCallExprs with '_ignore_nulls'
+   *    appended to the function name, because the BE implements them as different
+   *    functions.
+   * 7. Explicitly set the default window if no window was given but there
+   *    are order-by elements.
+   * 8. first/last_value() with RANGE window:
+   *    Rewrite as a ROWS window.
+   */
+  private void standardize(Analyzer analyzer) {
+    FunctionName analyticFnName = getFnCall().getFnName();
+
+    // 1. Set a window from UNBOUNDED PRECEDING to CURRENT_ROW for row_number().
+    if (analyticFnName.getFunction().equals(ROWNUMBER)) {
+      Preconditions.checkState(window_ == null, "Unexpected window set for row_numer()");
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
+          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+          new Boundary(BoundaryType.CURRENT_ROW, null));
+      resetWindow_ = true;
+      return;
+    }
+
+    // 2. Explicitly set the default arguments to lead()/lag() for BE simplicity.
+    // Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING,
+    // Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
+    if (isOffsetFn(getFnCall().getFn())) {
+      Preconditions.checkState(window_ == null);
+
+      // If necessary, create a new fn call with the default args explicitly set.
+      List<Expr> newExprParams = null;
+      if (getFnCall().getChildren().size() == 1) {
+        newExprParams = Lists.newArrayListWithExpectedSize(3);
+        newExprParams.addAll(getFnCall().getChildren());
+        // Default offset is 1.
+        newExprParams.add(new NumericLiteral(BigDecimal.valueOf(1)));
+        // Default default value is NULL.
+        newExprParams.add(new NullLiteral());
+      } else if (getFnCall().getChildren().size() == 2) {
+        newExprParams = Lists.newArrayListWithExpectedSize(3);
+        newExprParams.addAll(getFnCall().getChildren());
+        // Default default value is NULL.
+        newExprParams.add(new NullLiteral());
+      } else  {
+        Preconditions.checkState(getFnCall().getChildren().size() == 3);
+      }
+      if (newExprParams != null) {
+        fnCall_ = new FunctionCallExpr(getFnCall().getFnName(),
+            new FunctionParams(newExprParams));
+        fnCall_.setIsAnalyticFnCall(true);
+        fnCall_.analyzeNoThrow(analyzer);
+      }
+
+      // Set the window.
+      BoundaryType rightBoundaryType = BoundaryType.FOLLOWING;
+      if (analyticFnName.getFunction().equals(LAG)) {
+        rightBoundaryType = BoundaryType.PRECEDING;
+      }
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
+          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+          new Boundary(rightBoundaryType, getOffsetExpr(getFnCall())));
+      try {
+        window_.analyze(analyzer);
+      } catch (AnalysisException e) {
+        throw new IllegalStateException(e);
+      }
+      resetWindow_ = true;
+      return;
+    }
+
+    // 3.
+    if (analyticFnName.getFunction().equals(FIRST_VALUE)
+        && window_ != null
+        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
+        && !getFnCall().getParams().isIgnoreNulls()) {
+      if (window_.getLeftBoundary().getType() != BoundaryType.PRECEDING) {
+        window_ = new AnalyticWindow(window_.getType(), window_.getLeftBoundary(),
+            window_.getLeftBoundary());
+        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE),
+            getFnCall().getParams());
+      } else {
+        List<Expr> paramExprs = Expr.cloneList(getFnCall().getParams().exprs());
+        if (window_.getRightBoundary().getType() == BoundaryType.PRECEDING) {
+          // The number of rows preceding for the end bound determines the number of
+          // rows at the beginning of each partition that should have a NULL value.
+          paramExprs.add(new NumericLiteral(window_.getRightBoundary().getOffsetValue(),
+              Type.BIGINT));
+        } else {
+          // -1 indicates that no NULL values are inserted even though we set the end
+          // bound to the start bound (which is PRECEDING) below; this is different from
+          // the default behavior of windows with an end bound PRECEDING.
+          paramExprs.add(new NumericLiteral(BigInteger.valueOf(-1), Type.BIGINT));
+        }
+
+        window_ = new AnalyticWindow(window_.getType(),
+            new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
+            window_.getLeftBoundary());
+        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_REWRITE),
+            new FunctionParams(paramExprs));
+        fnCall_.setIsInternalFnCall(true);
+      }
+      fnCall_.setIsAnalyticFnCall(true);
+      fnCall_.analyzeNoThrow(analyzer);
+      // Use getType() instead if getReturnType() because wildcard decimals
+      // have only been resolved in the former.
+      type_ = fnCall_.getType();
+      analyticFnName = getFnCall().getFnName();
+    }
+
+    // 4. Reverse the ordering and window for windows not starting with UNBOUNDED
+    // PRECEDING and either: ending with UNBOUNDED FOLLOWING or
+    // first_value(... ignore nulls)
+    if (window_ != null
+        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
+        && (window_.getRightBoundary().getType() == BoundaryType.UNBOUNDED_FOLLOWING
+            || (analyticFnName.getFunction().equals(FIRST_VALUE)
+                && getFnCall().getParams().isIgnoreNulls()))) {
+      orderByElements_ = OrderByElement.reverse(orderByElements_);
+      window_ = window_.reverse();
+
+      // Also flip first_value()/last_value(). For other analytic functions there is no
+      // need to also change the function.
+      FunctionName reversedFnName = null;
+      if (analyticFnName.getFunction().equals(FIRST_VALUE)) {
+        reversedFnName = new FunctionName(LAST_VALUE);
+      } else if (analyticFnName.getFunction().equals(LAST_VALUE)) {
+        reversedFnName = new FunctionName(FIRST_VALUE);
+      }
+      if (reversedFnName != null) {
+        fnCall_ = new FunctionCallExpr(reversedFnName, getFnCall().getParams());
+        fnCall_.setIsAnalyticFnCall(true);
+        fnCall_.analyzeNoThrow(analyzer);
+      }
+      analyticFnName = getFnCall().getFnName();
+    }
+
+    // 5. Set the start boundary to CURRENT_ROW for first_value() if the end boundary
+    // is UNBOUNDED_PRECEDING and IGNORE NULLS is not set.
+    if (analyticFnName.getFunction().equals(FIRST_VALUE)
+        && window_ != null
+        && window_.getLeftBoundary().getType() == BoundaryType.UNBOUNDED_PRECEDING
+        && window_.getRightBoundary().getType() != BoundaryType.PRECEDING
+        && !getFnCall().getParams().isIgnoreNulls()) {
+      window_.setRightBoundary(new Boundary(BoundaryType.CURRENT_ROW, null));
+    }
+
+    // 6. Set the default window.
+    if (!orderByElements_.isEmpty() && window_ == null) {
+      window_ = AnalyticWindow.DEFAULT_WINDOW;
+      resetWindow_ = true;
+    }
+
+    // 7. Change first_value/last_value RANGE windows to ROWS.
+    if ((analyticFnName.getFunction().equals(FIRST_VALUE)
+         || analyticFnName.getFunction().equals(LAST_VALUE))
+        && window_ != null
+        && window_.getType() == AnalyticWindow.Type.RANGE) {
+      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS, window_.getLeftBoundary(),
+          window_.getRightBoundary());
+    }
+
+    // 8. Append IGNORE NULLS to fn name if set.
+    if (getFnCall().getParams().isIgnoreNulls()) {
+      if (analyticFnName.getFunction().equals(LAST_VALUE)) {
+        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE_IGNORE_NULLS),
+            getFnCall().getParams());
+      } else {
+        Preconditions.checkState(analyticFnName.getFunction().equals(FIRST_VALUE));
+        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_IGNORE_NULLS),
+            getFnCall().getParams());
+      }
+
+      fnCall_.setIsAnalyticFnCall(true);
+      fnCall_.setIsInternalFnCall(true);
+      fnCall_.analyzeNoThrow(analyzer);
+      analyticFnName = getFnCall().getFnName();
+      Preconditions.checkState(type_.equals(fnCall_.getType()));
+    }
+  }
+
+  /**
+   * Returns the explicit or implicit offset of an analytic function call.
+   */
+  private Expr getOffsetExpr(FunctionCallExpr offsetFnCall) {
+    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
+    if (offsetFnCall.getChild(1) != null) return offsetFnCall.getChild(1);
+    // The default offset is 1.
+    return new NumericLiteral(BigDecimal.valueOf(1));
+  }
+
+  /**
+   * Keep fnCall_, partitionExprs_ and orderByElements_ in sync with children_.
+   */
+  private void syncWithChildren() {
+    int numArgs = fnCall_.getChildren().size();
+    for (int i = 0; i < numArgs; ++i) {
+      fnCall_.setChild(i, getChild(i));
+    }
+    int numPartitionExprs = partitionExprs_.size();
+    for (int i = 0; i < numPartitionExprs; ++i) {
+      partitionExprs_.set(i, getChild(numArgs + i));
+    }
+    for (int i = 0; i < orderByElements_.size(); ++i) {
+      orderByElements_.get(i).setExpr(getChild(numArgs + numPartitionExprs + i));
+    }
+  }
+
+  /**
+   * Populate children_ from fnCall_, partitionExprs_, orderByElements_
+   */
+  private void setChildren() {
+    getChildren().clear();
+    addChildren(fnCall_.getChildren());
+    addChildren(partitionExprs_);
+    for (OrderByElement e: orderByElements_) {
+      addChild(e.getExpr());
+    }
+    if (window_ != null) {
+      if (window_.getLeftBoundary().getExpr() != null) {
+        addChild(window_.getLeftBoundary().getExpr());
+      }
+      if (window_.getRightBoundary() != null
+          && window_.getRightBoundary().getExpr() != null) {
+        addChild(window_.getRightBoundary().getExpr());
+      }
+    }
+  }
+
+  @Override
+  protected void resetAnalysisState() {
+    super.resetAnalysisState();
+    fnCall_.resetAnalysisState();
+    if (resetWindow_) window_ = null;
+    resetWindow_ = false;
+    // sync with children, now that they've been reset
+    syncWithChildren();
+  }
+
+  @Override
+  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws AnalysisException {
+    Expr e = super.substituteImpl(smap, analyzer);
+    if (!(e instanceof AnalyticExpr)) return e;
+    // Re-sync state after possible child substitution.
+    ((AnalyticExpr) e).syncWithChildren();
+    return e;
+  }
+}



[48/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AnalyticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/AnalyticExpr.java
deleted file mode 100644
index 9abd82d..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticExpr.java
+++ /dev/null
@@ -1,839 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AnalyticWindow.Boundary;
-import com.cloudera.impala.analysis.AnalyticWindow.BoundaryType;
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.util.TColumnValueUtil;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of an analytic function call with OVER clause.
- * All "subexpressions" (such as the actual function call parameters as well as the
- * partition/ordering exprs, etc.) are embedded as children in order to allow expr
- * substitution:
- *   function call params: child 0 .. #params
- *   partition exprs: children #params + 1 .. #params + #partition-exprs
- *   ordering exprs:
- *     children #params + #partition-exprs + 1 ..
- *       #params + #partition-exprs + #order-by-elements
- *   exprs in windowing clause: remaining children
- *
- * Note that it's wrong to embed the FunctionCallExpr itself as a child,
- * because in 'COUNT(..) OVER (..)' the 'COUNT(..)' is not part of a standard aggregate
- * computation and must not be substituted as such. However, the parameters of the
- * analytic function call might reference the output of an aggregate computation
- * and need to be substituted as such; example: COUNT(COUNT(..)) OVER (..)
- */
-public class AnalyticExpr extends Expr {
-  private final static Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class);
-
-  private FunctionCallExpr fnCall_;
-  private final List<Expr> partitionExprs_;
-  // These elements are modified to point to the corresponding child exprs to keep them
-  // in sync through expr substitutions.
-  private List<OrderByElement> orderByElements_ = Lists.newArrayList();
-  private AnalyticWindow window_;
-
-  // If set, requires the window to be set to null in resetAnalysisState(). Required for
-  // proper substitution/cloning because standardization may set a window that is illegal
-  // in SQL, and hence, will fail analysis().
-  private boolean resetWindow_ = false;
-
-  // SQL string of this AnalyticExpr before standardization. Returned in toSqlImpl().
-  private String sqlString_;
-
-  private static String LEAD = "lead";
-  private static String LAG = "lag";
-  private static String FIRST_VALUE = "first_value";
-  private static String LAST_VALUE = "last_value";
-  private static String FIRST_VALUE_IGNORE_NULLS = "first_value_ignore_nulls";
-  private static String LAST_VALUE_IGNORE_NULLS = "last_value_ignore_nulls";
-  private static String RANK = "rank";
-  private static String DENSERANK = "dense_rank";
-  private static String ROWNUMBER = "row_number";
-  private static String MIN = "min";
-  private static String MAX = "max";
-  private static String PERCENT_RANK = "percent_rank";
-  private static String CUME_DIST = "cume_dist";
-  private static String NTILE = "ntile";
-
-  // Internal function used to implement FIRST_VALUE with a window rewrite and
-  // additional null handling in the backend.
-  public static String FIRST_VALUE_REWRITE = "first_value_rewrite";
-
-  public AnalyticExpr(FunctionCallExpr fnCall, List<Expr> partitionExprs,
-      List<OrderByElement> orderByElements, AnalyticWindow window) {
-    Preconditions.checkNotNull(fnCall);
-    fnCall_ = fnCall;
-    partitionExprs_ = partitionExprs != null ? partitionExprs : new ArrayList<Expr>();
-    if (orderByElements != null) orderByElements_.addAll(orderByElements);
-    window_ = window;
-    setChildren();
-  }
-
-  /**
-   * clone() c'tor
-   */
-  protected AnalyticExpr(AnalyticExpr other) {
-    super(other);
-    fnCall_ = (FunctionCallExpr) other.fnCall_.clone();
-    for (OrderByElement e: other.orderByElements_) {
-      orderByElements_.add(e.clone());
-    }
-    partitionExprs_ = Expr.cloneList(other.partitionExprs_);
-    window_ = (other.window_ != null ? other.window_.clone() : null);
-    resetWindow_ = other.resetWindow_;
-    sqlString_ = other.sqlString_;
-    setChildren();
-  }
-
-  public FunctionCallExpr getFnCall() { return fnCall_; }
-  public List<Expr> getPartitionExprs() { return partitionExprs_; }
-  public List<OrderByElement> getOrderByElements() { return orderByElements_; }
-  public AnalyticWindow getWindow() { return window_; }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    AnalyticExpr o = (AnalyticExpr)obj;
-    if (!fnCall_.equals(o.getFnCall())) return false;
-    if ((window_ == null) != (o.window_ == null)) return false;
-    if (window_ != null) {
-      if (!window_.equals(o.window_)) return false;
-    }
-    return orderByElements_.equals(o.orderByElements_);
-  }
-
-  /**
-   * Analytic exprs cannot be constant.
-   */
-  @Override
-  public boolean isConstant() { return false; }
-
-  @Override
-  public Expr clone() { return new AnalyticExpr(this); }
-
-  @Override
-  public String toSqlImpl() {
-    if (sqlString_ != null) return sqlString_;
-    StringBuilder sb = new StringBuilder();
-    sb.append(fnCall_.toSql()).append(" OVER (");
-    boolean needsSpace = false;
-    if (!partitionExprs_.isEmpty()) {
-      sb.append("PARTITION BY ").append(Expr.toSql(partitionExprs_));
-      needsSpace = true;
-    }
-    if (!orderByElements_.isEmpty()) {
-      List<String> orderByStrings = Lists.newArrayList();
-      for (OrderByElement e: orderByElements_) {
-        orderByStrings.add(e.toSql());
-      }
-      if (needsSpace) sb.append(" ");
-      sb.append("ORDER BY ").append(Joiner.on(", ").join(orderByStrings));
-      needsSpace = true;
-    }
-    if (window_ != null) {
-      if (needsSpace) sb.append(" ");
-      sb.append(window_.toSql());
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("fn", getFnCall())
-        .add("window", window_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-  }
-
-  private static boolean isAnalyticFn(Function fn) {
-    return fn instanceof AggregateFunction
-        && ((AggregateFunction) fn).isAnalyticFn();
-  }
-
-  private static boolean isAnalyticFn(Function fn, String fnName) {
-    return isAnalyticFn(fn) && fn.functionName().equals(fnName);
-  }
-
-  public static boolean isAggregateFn(Function fn) {
-    return fn instanceof AggregateFunction
-        && ((AggregateFunction) fn).isAggregateFn();
-  }
-
-  public static boolean isPercentRankFn(Function fn) {
-    return isAnalyticFn(fn, PERCENT_RANK);
-  }
-
-  public static boolean isCumeDistFn(Function fn) {
-    return isAnalyticFn(fn, CUME_DIST);
-  }
-
-  public static boolean isNtileFn(Function fn) {
-    return isAnalyticFn(fn, NTILE);
-  }
-
-  static private boolean isOffsetFn(Function fn) {
-    return isAnalyticFn(fn, LEAD) || isAnalyticFn(fn, LAG);
-  }
-
-  static private boolean isMinMax(Function fn) {
-    return isAnalyticFn(fn, MIN) || isAnalyticFn(fn, MAX);
-  }
-
-  static private boolean isRankingFn(Function fn) {
-    return isAnalyticFn(fn, RANK) || isAnalyticFn(fn, DENSERANK) ||
-        isAnalyticFn(fn, ROWNUMBER);
-  }
-
-  /**
-   * Rewrite the following analytic functions:
-   * percent_rank(), cume_dist() and ntile()
-   *
-   * Returns a new Expr if the analytic expr is rewritten, returns null if it's not one
-   * that we want to rewrite.
-   */
-  public static Expr rewrite(AnalyticExpr analyticExpr) {
-    Function fn = analyticExpr.getFnCall().getFn();
-    if (AnalyticExpr.isPercentRankFn(fn)) {
-      return createPercentRank(analyticExpr);
-    } else if (AnalyticExpr.isCumeDistFn(fn)) {
-      return createCumeDist(analyticExpr);
-    } else if (AnalyticExpr.isNtileFn(fn)) {
-      return createNtile(analyticExpr);
-    }
-    return null;
-  }
-
-  /**
-   * Rewrite percent_rank() to the following:
-   *
-   * percent_rank() over([partition by clause] order by clause)
-   *    = (Count == 1) ? 0:(Rank - 1)/(Count - 1)
-   * where,
-   *  Rank = rank() over([partition by clause] order by clause)
-   *  Count = count() over([partition by clause])
-   */
-  private static Expr createPercentRank(AnalyticExpr analyticExpr) {
-    Preconditions.checkState(
-        AnalyticExpr.isPercentRankFn(analyticExpr.getFnCall().getFn()));
-
-    NumericLiteral zero = new NumericLiteral(BigInteger.valueOf(0), ScalarType.BIGINT);
-    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
-    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
-    AnalyticExpr rankExpr = create("rank", analyticExpr, true, false);
-
-    ArithmeticExpr arithmeticRewrite =
-      new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
-        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rankExpr, one),
-        new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, one));
-
-    List<Expr> ifParams = Lists.newArrayList();
-    ifParams.add(
-      new BinaryPredicate(BinaryPredicate.Operator.EQ, one, countExpr));
-    ifParams.add(zero);
-    ifParams.add(arithmeticRewrite);
-    FunctionCallExpr resultantRewrite = new FunctionCallExpr("if", ifParams);
-
-    return resultantRewrite;
-  }
-
-  /**
-   * Rewrite cume_dist() to the following:
-   *
-   * cume_dist() over([partition by clause] order by clause)
-   *    = ((Count - Rank) + 1)/Count
-   * where,
-   *  Rank = rank() over([partition by clause] order by clause DESC)
-   *  Count = count() over([partition by clause])
-   */
-  private static Expr createCumeDist(AnalyticExpr analyticExpr) {
-    Preconditions.checkState(
-        AnalyticExpr.isCumeDistFn(analyticExpr.getFnCall().getFn()));
-    AnalyticExpr rankExpr = create("rank", analyticExpr, true, true);
-    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
-    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
-    ArithmeticExpr arithmeticRewrite =
-        new ArithmeticExpr(ArithmeticExpr.Operator.DIVIDE,
-          new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
-            new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, countExpr, rankExpr),
-          one),
-        countExpr);
-    return arithmeticRewrite;
-  }
-
-  /**
-   * Rewrite ntile() to the following:
-   *
-   * ntile(B) over([partition by clause] order by clause)
-   *    = floor(min(Count, B) * (RowNumber - 1)/Count) + 1
-   * where,
-   *  RowNumber = row_number() over([partition by clause] order by clause)
-   *  Count = count() over([partition by clause])
-   */
-  private static Expr createNtile(AnalyticExpr analyticExpr) {
-    Preconditions.checkState(
-        AnalyticExpr.isNtileFn(analyticExpr.getFnCall().getFn()));
-    Expr bucketExpr = analyticExpr.getChild(0);
-    AnalyticExpr rowNumExpr = create("row_number", analyticExpr, true, false);
-    AnalyticExpr countExpr = create("count", analyticExpr, false, false);
-
-    List<Expr> ifParams = Lists.newArrayList();
-    ifParams.add(
-        new BinaryPredicate(BinaryPredicate.Operator.LT, bucketExpr, countExpr));
-    ifParams.add(bucketExpr);
-    ifParams.add(countExpr);
-
-    NumericLiteral one = new NumericLiteral(BigInteger.valueOf(1), ScalarType.BIGINT);
-    ArithmeticExpr minMultiplyRowMinusOne =
-        new ArithmeticExpr(ArithmeticExpr.Operator.MULTIPLY,
-          new ArithmeticExpr(ArithmeticExpr.Operator.SUBTRACT, rowNumExpr, one),
-          new FunctionCallExpr("if", ifParams));
-    ArithmeticExpr divideAddOne =
-        new ArithmeticExpr(ArithmeticExpr.Operator.ADD,
-          new ArithmeticExpr(ArithmeticExpr.Operator.INT_DIVIDE,
-            minMultiplyRowMinusOne, countExpr),
-        one);
-    return divideAddOne;
-  }
-
-  /**
-   * Create a new Analytic Expr and associate it with a new function.
-   * Takes a reference analytic expression and clones the partition expressions and the
-   * order by expressions if 'copyOrderBy' is set and optionally reverses it if
-   * 'reverseOrderBy' is set. The new function that it will be associated with is
-   * specified by fnName.
-   */
-  private static AnalyticExpr create(String fnName,
-      AnalyticExpr referenceExpr, boolean copyOrderBy, boolean reverseOrderBy) {
-    FunctionCallExpr fnExpr = new FunctionCallExpr(fnName, new ArrayList<Expr>());
-    fnExpr.setIsAnalyticFnCall(true);
-    List<OrderByElement> orderByElements = null;
-    if (copyOrderBy) {
-      if (reverseOrderBy) {
-        orderByElements = OrderByElement.reverse(referenceExpr.getOrderByElements());
-      } else {
-        orderByElements = Lists.newArrayList();
-        for (OrderByElement elem: referenceExpr.getOrderByElements()) {
-          orderByElements.add(elem.clone());
-        }
-      }
-    }
-    AnalyticExpr analyticExpr = new AnalyticExpr(fnExpr,
-        Expr.cloneList(referenceExpr.getPartitionExprs()), orderByElements, null);
-    return analyticExpr;
-  }
-
-  /**
-   * Checks that the value expr of an offset boundary of a RANGE window is compatible
-   * with orderingExprs (and that there's only a single ordering expr).
-   */
-  private void checkRangeOffsetBoundaryExpr(AnalyticWindow.Boundary boundary)
-      throws AnalysisException {
-    Preconditions.checkState(boundary.getType().isOffset());
-    if (orderByElements_.size() > 1) {
-      throw new AnalysisException("Only one ORDER BY expression allowed if used with "
-          + "a RANGE window with PRECEDING/FOLLOWING: " + toSql());
-    }
-    Expr rangeExpr = boundary.getExpr();
-    if (!Type.isImplicitlyCastable(
-        rangeExpr.getType(), orderByElements_.get(0).getExpr().getType(), false)) {
-      throw new AnalysisException(
-          "The value expression of a PRECEDING/FOLLOWING clause of a RANGE window must "
-            + "be implicitly convertable to the ORDER BY expression's type: "
-            + rangeExpr.toSql() + " cannot be implicitly converted to "
-            + orderByElements_.get(0).getExpr().getType().toSql());
-    }
-  }
-
-  /**
-   * Checks offset of lag()/lead().
-   */
-  void checkOffset(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
-    Preconditions.checkState(getFnCall().getChildren().size() > 1);
-    Expr offset = getFnCall().getChild(1);
-    Preconditions.checkState(offset.getType().isIntegerType());
-    boolean isPosConstant = true;
-    if (!offset.isConstant()) {
-      isPosConstant = false;
-    } else {
-      try {
-        TColumnValue val = FeSupport.EvalConstExpr(offset, analyzer.getQueryCtx());
-        if (TColumnValueUtil.getNumericVal(val) <= 0) isPosConstant = false;
-      } catch (InternalException exc) {
-        throw new AnalysisException(
-            "Couldn't evaluate LEAD/LAG offset: " + exc.getMessage());
-      }
-    }
-    if (!isPosConstant) {
-      throw new AnalysisException(
-          "The offset parameter of LEAD/LAG must be a constant positive integer: "
-            + getFnCall().toSql());
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    fnCall_.analyze(analyzer);
-    super.analyze(analyzer);
-    type_ = getFnCall().getType();
-
-    for (Expr e: partitionExprs_) {
-      if (e.isConstant()) {
-        throw new AnalysisException(
-            "Expressions in the PARTITION BY clause must not be constant: "
-              + e.toSql() + " (in " + toSql() + ")");
-      } else if (e.getType().isComplexType()) {
-        throw new AnalysisException(String.format("PARTITION BY expression '%s' with " +
-            "complex type '%s' is not supported.", e.toSql(),
-            e.getType().toSql()));
-      }
-    }
-    for (OrderByElement e: orderByElements_) {
-      if (e.getExpr().isConstant()) {
-        throw new AnalysisException(
-            "Expressions in the ORDER BY clause must not be constant: "
-              + e.getExpr().toSql() + " (in " + toSql() + ")");
-      } else if (e.getExpr().getType().isComplexType()) {
-        throw new AnalysisException(String.format("ORDER BY expression '%s' with " +
-            "complex type '%s' is not supported.", e.getExpr().toSql(),
-            e.getExpr().getType().toSql()));
-      }
-    }
-
-    if (getFnCall().getParams().isDistinct()) {
-      throw new AnalysisException(
-          "DISTINCT not allowed in analytic function: " + getFnCall().toSql());
-    }
-
-    if (getFnCall().getParams().isIgnoreNulls()) {
-      String fnName = getFnCall().getFnName().getFunction();
-      if (!fnName.equals(LAST_VALUE) && !fnName.equals(FIRST_VALUE)) {
-        throw new AnalysisException("Function " + fnName.toUpperCase()
-            + " does not accept the keyword IGNORE NULLS.");
-      }
-    }
-
-    // check for correct composition of analytic expr
-    Function fn = getFnCall().getFn();
-    if (!(fn instanceof AggregateFunction)) {
-        throw new AnalysisException(
-            "OVER clause requires aggregate or analytic function: "
-              + getFnCall().toSql());
-    }
-
-    // check for non-analytic aggregate functions
-    if (!isAnalyticFn(fn)) {
-      throw new AnalysisException(
-          String.format("Aggregate function '%s' not supported with OVER clause.",
-              getFnCall().toSql()));
-    }
-
-    if (isAnalyticFn(fn) && !isAggregateFn(fn)) {
-      if (orderByElements_.isEmpty()) {
-        throw new AnalysisException(
-            "'" + getFnCall().toSql() + "' requires an ORDER BY clause");
-      }
-      if ((isRankingFn(fn) || isOffsetFn(fn)) && window_ != null) {
-        throw new AnalysisException(
-            "Windowing clause not allowed with '" + getFnCall().toSql() + "'");
-      }
-      if (isOffsetFn(fn) && getFnCall().getChildren().size() > 1) {
-        checkOffset(analyzer);
-        // check the default, which needs to be a constant at the moment
-        // TODO: remove this check when the backend can handle non-constants
-        if (getFnCall().getChildren().size() > 2) {
-          if (!getFnCall().getChild(2).isConstant()) {
-            throw new AnalysisException(
-                "The default parameter (parameter 3) of LEAD/LAG must be a constant: "
-                  + getFnCall().toSql());
-          }
-        }
-      }
-      if (isNtileFn(fn)) {
-        // TODO: IMPALA-2171:Remove this when ntile() can handle a non-constant argument.
-        if (!getFnCall().getChild(0).isConstant()) {
-          throw new AnalysisException("NTILE() requires a constant argument");
-        }
-        // Check if argument value is zero or negative and throw an exception if found.
-        try {
-          TColumnValue bucketValue =
-              FeSupport.EvalConstExpr(getFnCall().getChild(0), analyzer.getQueryCtx());
-          Long arg = bucketValue.getLong_val();
-          if (arg <= 0) {
-            throw new AnalysisException("NTILE() requires a positive argument: " + arg);
-          }
-        } catch (InternalException e) {
-          throw new AnalysisException(e.toString());
-        }
-      }
-    }
-
-    if (window_ != null) {
-      if (orderByElements_.isEmpty()) {
-        throw new AnalysisException("Windowing clause requires ORDER BY clause: "
-            + toSql());
-      }
-      window_.analyze(analyzer);
-
-      if (!orderByElements_.isEmpty()
-          && window_.getType() == AnalyticWindow.Type.RANGE) {
-        // check that preceding/following ranges match ordering
-        if (window_.getLeftBoundary().getType().isOffset()) {
-          checkRangeOffsetBoundaryExpr(window_.getLeftBoundary());
-        }
-        if (window_.getRightBoundary() != null
-            && window_.getRightBoundary().getType().isOffset()) {
-          checkRangeOffsetBoundaryExpr(window_.getRightBoundary());
-        }
-      }
-    }
-
-    // check nesting
-    if (TreeNode.contains(getChildren(), AnalyticExpr.class)) {
-      throw new AnalysisException(
-          "Nesting of analytic expressions is not allowed: " + toSql());
-    }
-    sqlString_ = toSql();
-
-    standardize(analyzer);
-
-    // min/max is not currently supported on sliding windows (i.e. start bound is not
-    // unbounded).
-    if (window_ != null && isMinMax(fn) &&
-        window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) {
-      throw new AnalysisException(
-          "'" + getFnCall().toSql() + "' is only supported with an "
-            + "UNBOUNDED PRECEDING start bound.");
-    }
-
-    setChildren();
-  }
-
-  /**
-   * If necessary, rewrites the analytic function, window, and/or order-by elements into
-   * a standard format for the purpose of simpler backend execution, as follows:
-   * 1. row_number():
-   *    Set a window from UNBOUNDED PRECEDING to CURRENT_ROW.
-   * 2. lead()/lag():
-   *    Explicitly set the default arguments to for BE simplicity.
-   *    Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING.
-   *    Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
-   * 3. FIRST_VALUE without UNBOUNDED PRECEDING or IGNORE NULLS gets rewritten to use a
-   *    different window and function. There are a few cases:
-   *     a) Start bound is X FOLLOWING or CURRENT ROW (X=0):
-   *        Use 'last_value' with a window where both bounds are X FOLLOWING (or
-   *        CURRENT ROW). Setting the start bound to X following is necessary because the
-   *        X rows at the end of a partition have no rows in their window. Note that X
-   *        FOLLOWING could be rewritten as lead(X) but that would not work for CURRENT
-   *        ROW.
-   *     b) Start bound is X PRECEDING and end bound is CURRENT ROW or FOLLOWING:
-   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. An
-   *        extra parameter '-1' is added to indicate to the backend that NULLs should
-   *        not be added for the first X rows.
-   *     c) Start bound is X PRECEDING and end bound is Y PRECEDING:
-   *        Use 'first_value_rewrite' and a window with an end bound X PRECEDING. The
-   *        first Y rows in a partition have empty windows and should be NULL. An extra
-   *        parameter with the integer constant Y is added to indicate to the backend
-   *        that NULLs should be added for the first Y rows.
-   *    The performance optimization here and in 5. below cannot be applied in the case of
-   *    IGNORE NULLS because they change what values appear in the window, which in the
-   *    IGNORE NULLS case could mean the correct value to return isn't even in the window,
-   *    eg. if all of the values in the rewritten window are NULL but one of the values in
-   *    the original window isn't.
-   * 4. Start bound is not UNBOUNDED PRECEDING and either the end bound is UNBOUNDED
-   *    FOLLOWING or the function is first_value(... ignore nulls):
-   *    Reverse the ordering and window, and flip first_value() and last_value().
-   * 5. first_value() with UNBOUNDED PRECEDING and not IGNORE NULLS:
-   *    Set the end boundary to CURRENT_ROW.
-   * 6. Rewrite IGNORE NULLS as regular FunctionCallExprs with '_ignore_nulls'
-   *    appended to the function name, because the BE implements them as different
-   *    functions.
-   * 7. Explicitly set the default window if no window was given but there
-   *    are order-by elements.
-   * 8. first/last_value() with RANGE window:
-   *    Rewrite as a ROWS window.
-   */
-  private void standardize(Analyzer analyzer) {
-    FunctionName analyticFnName = getFnCall().getFnName();
-
-    // 1. Set a window from UNBOUNDED PRECEDING to CURRENT_ROW for row_number().
-    if (analyticFnName.getFunction().equals(ROWNUMBER)) {
-      Preconditions.checkState(window_ == null, "Unexpected window set for row_numer()");
-      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
-          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
-          new Boundary(BoundaryType.CURRENT_ROW, null));
-      resetWindow_ = true;
-      return;
-    }
-
-    // 2. Explicitly set the default arguments to lead()/lag() for BE simplicity.
-    // Set a window for lead(): UNBOUNDED PRECEDING to OFFSET FOLLOWING,
-    // Set a window for lag(): UNBOUNDED PRECEDING to OFFSET PRECEDING.
-    if (isOffsetFn(getFnCall().getFn())) {
-      Preconditions.checkState(window_ == null);
-
-      // If necessary, create a new fn call with the default args explicitly set.
-      List<Expr> newExprParams = null;
-      if (getFnCall().getChildren().size() == 1) {
-        newExprParams = Lists.newArrayListWithExpectedSize(3);
-        newExprParams.addAll(getFnCall().getChildren());
-        // Default offset is 1.
-        newExprParams.add(new NumericLiteral(BigDecimal.valueOf(1)));
-        // Default default value is NULL.
-        newExprParams.add(new NullLiteral());
-      } else if (getFnCall().getChildren().size() == 2) {
-        newExprParams = Lists.newArrayListWithExpectedSize(3);
-        newExprParams.addAll(getFnCall().getChildren());
-        // Default default value is NULL.
-        newExprParams.add(new NullLiteral());
-      } else  {
-        Preconditions.checkState(getFnCall().getChildren().size() == 3);
-      }
-      if (newExprParams != null) {
-        fnCall_ = new FunctionCallExpr(getFnCall().getFnName(),
-            new FunctionParams(newExprParams));
-        fnCall_.setIsAnalyticFnCall(true);
-        fnCall_.analyzeNoThrow(analyzer);
-      }
-
-      // Set the window.
-      BoundaryType rightBoundaryType = BoundaryType.FOLLOWING;
-      if (analyticFnName.getFunction().equals(LAG)) {
-        rightBoundaryType = BoundaryType.PRECEDING;
-      }
-      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS,
-          new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
-          new Boundary(rightBoundaryType, getOffsetExpr(getFnCall())));
-      try {
-        window_.analyze(analyzer);
-      } catch (AnalysisException e) {
-        throw new IllegalStateException(e);
-      }
-      resetWindow_ = true;
-      return;
-    }
-
-    // 3.
-    if (analyticFnName.getFunction().equals(FIRST_VALUE)
-        && window_ != null
-        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
-        && !getFnCall().getParams().isIgnoreNulls()) {
-      if (window_.getLeftBoundary().getType() != BoundaryType.PRECEDING) {
-        window_ = new AnalyticWindow(window_.getType(), window_.getLeftBoundary(),
-            window_.getLeftBoundary());
-        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE),
-            getFnCall().getParams());
-      } else {
-        List<Expr> paramExprs = Expr.cloneList(getFnCall().getParams().exprs());
-        if (window_.getRightBoundary().getType() == BoundaryType.PRECEDING) {
-          // The number of rows preceding for the end bound determines the number of
-          // rows at the beginning of each partition that should have a NULL value.
-          paramExprs.add(new NumericLiteral(window_.getRightBoundary().getOffsetValue(),
-              Type.BIGINT));
-        } else {
-          // -1 indicates that no NULL values are inserted even though we set the end
-          // bound to the start bound (which is PRECEDING) below; this is different from
-          // the default behavior of windows with an end bound PRECEDING.
-          paramExprs.add(new NumericLiteral(BigInteger.valueOf(-1), Type.BIGINT));
-        }
-
-        window_ = new AnalyticWindow(window_.getType(),
-            new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
-            window_.getLeftBoundary());
-        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_REWRITE),
-            new FunctionParams(paramExprs));
-        fnCall_.setIsInternalFnCall(true);
-      }
-      fnCall_.setIsAnalyticFnCall(true);
-      fnCall_.analyzeNoThrow(analyzer);
-      // Use getType() instead if getReturnType() because wildcard decimals
-      // have only been resolved in the former.
-      type_ = fnCall_.getType();
-      analyticFnName = getFnCall().getFnName();
-    }
-
-    // 4. Reverse the ordering and window for windows not starting with UNBOUNDED
-    // PRECEDING and either: ending with UNBOUNDED FOLLOWING or
-    // first_value(... ignore nulls)
-    if (window_ != null
-        && window_.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING
-        && (window_.getRightBoundary().getType() == BoundaryType.UNBOUNDED_FOLLOWING
-            || (analyticFnName.getFunction().equals(FIRST_VALUE)
-                && getFnCall().getParams().isIgnoreNulls()))) {
-      orderByElements_ = OrderByElement.reverse(orderByElements_);
-      window_ = window_.reverse();
-
-      // Also flip first_value()/last_value(). For other analytic functions there is no
-      // need to also change the function.
-      FunctionName reversedFnName = null;
-      if (analyticFnName.getFunction().equals(FIRST_VALUE)) {
-        reversedFnName = new FunctionName(LAST_VALUE);
-      } else if (analyticFnName.getFunction().equals(LAST_VALUE)) {
-        reversedFnName = new FunctionName(FIRST_VALUE);
-      }
-      if (reversedFnName != null) {
-        fnCall_ = new FunctionCallExpr(reversedFnName, getFnCall().getParams());
-        fnCall_.setIsAnalyticFnCall(true);
-        fnCall_.analyzeNoThrow(analyzer);
-      }
-      analyticFnName = getFnCall().getFnName();
-    }
-
-    // 5. Set the start boundary to CURRENT_ROW for first_value() if the end boundary
-    // is UNBOUNDED_PRECEDING and IGNORE NULLS is not set.
-    if (analyticFnName.getFunction().equals(FIRST_VALUE)
-        && window_ != null
-        && window_.getLeftBoundary().getType() == BoundaryType.UNBOUNDED_PRECEDING
-        && window_.getRightBoundary().getType() != BoundaryType.PRECEDING
-        && !getFnCall().getParams().isIgnoreNulls()) {
-      window_.setRightBoundary(new Boundary(BoundaryType.CURRENT_ROW, null));
-    }
-
-    // 6. Set the default window.
-    if (!orderByElements_.isEmpty() && window_ == null) {
-      window_ = AnalyticWindow.DEFAULT_WINDOW;
-      resetWindow_ = true;
-    }
-
-    // 7. Change first_value/last_value RANGE windows to ROWS.
-    if ((analyticFnName.getFunction().equals(FIRST_VALUE)
-         || analyticFnName.getFunction().equals(LAST_VALUE))
-        && window_ != null
-        && window_.getType() == AnalyticWindow.Type.RANGE) {
-      window_ = new AnalyticWindow(AnalyticWindow.Type.ROWS, window_.getLeftBoundary(),
-          window_.getRightBoundary());
-    }
-
-    // 8. Append IGNORE NULLS to fn name if set.
-    if (getFnCall().getParams().isIgnoreNulls()) {
-      if (analyticFnName.getFunction().equals(LAST_VALUE)) {
-        fnCall_ = new FunctionCallExpr(new FunctionName(LAST_VALUE_IGNORE_NULLS),
-            getFnCall().getParams());
-      } else {
-        Preconditions.checkState(analyticFnName.getFunction().equals(FIRST_VALUE));
-        fnCall_ = new FunctionCallExpr(new FunctionName(FIRST_VALUE_IGNORE_NULLS),
-            getFnCall().getParams());
-      }
-
-      fnCall_.setIsAnalyticFnCall(true);
-      fnCall_.setIsInternalFnCall(true);
-      fnCall_.analyzeNoThrow(analyzer);
-      analyticFnName = getFnCall().getFnName();
-      Preconditions.checkState(type_.equals(fnCall_.getType()));
-    }
-  }
-
-  /**
-   * Returns the explicit or implicit offset of an analytic function call.
-   */
-  private Expr getOffsetExpr(FunctionCallExpr offsetFnCall) {
-    Preconditions.checkState(isOffsetFn(getFnCall().getFn()));
-    if (offsetFnCall.getChild(1) != null) return offsetFnCall.getChild(1);
-    // The default offset is 1.
-    return new NumericLiteral(BigDecimal.valueOf(1));
-  }
-
-  /**
-   * Keep fnCall_, partitionExprs_ and orderByElements_ in sync with children_.
-   */
-  private void syncWithChildren() {
-    int numArgs = fnCall_.getChildren().size();
-    for (int i = 0; i < numArgs; ++i) {
-      fnCall_.setChild(i, getChild(i));
-    }
-    int numPartitionExprs = partitionExprs_.size();
-    for (int i = 0; i < numPartitionExprs; ++i) {
-      partitionExprs_.set(i, getChild(numArgs + i));
-    }
-    for (int i = 0; i < orderByElements_.size(); ++i) {
-      orderByElements_.get(i).setExpr(getChild(numArgs + numPartitionExprs + i));
-    }
-  }
-
-  /**
-   * Populate children_ from fnCall_, partitionExprs_, orderByElements_
-   */
-  private void setChildren() {
-    getChildren().clear();
-    addChildren(fnCall_.getChildren());
-    addChildren(partitionExprs_);
-    for (OrderByElement e: orderByElements_) {
-      addChild(e.getExpr());
-    }
-    if (window_ != null) {
-      if (window_.getLeftBoundary().getExpr() != null) {
-        addChild(window_.getLeftBoundary().getExpr());
-      }
-      if (window_.getRightBoundary() != null
-          && window_.getRightBoundary().getExpr() != null) {
-        addChild(window_.getRightBoundary().getExpr());
-      }
-    }
-  }
-
-  @Override
-  protected void resetAnalysisState() {
-    super.resetAnalysisState();
-    fnCall_.resetAnalysisState();
-    if (resetWindow_) window_ = null;
-    resetWindow_ = false;
-    // sync with children, now that they've been reset
-    syncWithChildren();
-  }
-
-  @Override
-  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
-      throws AnalysisException {
-    Expr e = super.substituteImpl(smap, analyzer);
-    if (!(e instanceof AnalyticExpr)) return e;
-    // Re-sync state after possible child substitution.
-    ((AnalyticExpr) e).syncWithChildren();
-    return e;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AnalyticInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticInfo.java b/fe/src/main/java/com/cloudera/impala/analysis/AnalyticInfo.java
deleted file mode 100644
index d0d1a85..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticInfo.java
+++ /dev/null
@@ -1,199 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Type;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Encapsulates the analytic functions found in a single select block plus
- * the corresponding analytic result tuple and its substitution map.
- */
-public class AnalyticInfo extends AggregateInfoBase {
-  private final static Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class);
-
-  // All unique analytic exprs of a select block. Used to populate
-  // super.aggregateExprs_ based on AnalyticExpr.getFnCall() for each analytic expr
-  // in this list.
-  private final ArrayList<Expr> analyticExprs_;
-
-  // Intersection of the partition exps of all the analytic functions.
-  private final List<Expr> commonPartitionExprs_;
-
-  // map from analyticExprs_ to their corresponding analytic tuple slotrefs
-  private final ExprSubstitutionMap analyticTupleSmap_;
-
-  private AnalyticInfo(ArrayList<Expr> analyticExprs) {
-    super(new ArrayList<Expr>(), new ArrayList<FunctionCallExpr>());
-    analyticExprs_ = Expr.cloneList(analyticExprs);
-    // Extract the analytic function calls for each analytic expr.
-    for (Expr analyticExpr: analyticExprs) {
-      aggregateExprs_.add(((AnalyticExpr) analyticExpr).getFnCall());
-    }
-    analyticTupleSmap_ = new ExprSubstitutionMap();
-    commonPartitionExprs_ = computeCommonPartitionExprs();
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private AnalyticInfo(AnalyticInfo other) {
-    super(other);
-    analyticExprs_ =
-        (other.analyticExprs_ != null) ? Expr.cloneList(other.analyticExprs_) : null;
-    analyticTupleSmap_ = other.analyticTupleSmap_.clone();
-    commonPartitionExprs_ = Expr.cloneList(other.commonPartitionExprs_);
-  }
-
-  public ArrayList<Expr> getAnalyticExprs() { return analyticExprs_; }
-  public ExprSubstitutionMap getSmap() { return analyticTupleSmap_; }
-  public List<Expr> getCommonPartitionExprs() { return commonPartitionExprs_; }
-
-  /**
-   * Creates complete AnalyticInfo for analyticExprs, including tuple descriptors and
-   * smaps.
-   */
-  static public AnalyticInfo create(
-      ArrayList<Expr> analyticExprs, Analyzer analyzer) {
-    Preconditions.checkState(analyticExprs != null && !analyticExprs.isEmpty());
-    Expr.removeDuplicates(analyticExprs);
-    AnalyticInfo result = new AnalyticInfo(analyticExprs);
-    result.createTupleDescs(analyzer);
-
-    // The tuple descriptors are logical. Their slots are remapped to physical tuples
-    // during plan generation.
-    result.outputTupleDesc_.setIsMaterialized(false);
-    result.intermediateTupleDesc_.setIsMaterialized(false);
-
-    // Populate analyticTupleSmap_
-    Preconditions.checkState(analyticExprs.size() ==
-        result.outputTupleDesc_.getSlots().size());
-    for (int i = 0; i < analyticExprs.size(); ++i) {
-      result.analyticTupleSmap_.put(result.analyticExprs_.get(i),
-          new SlotRef(result.outputTupleDesc_.getSlots().get(i)));
-      result.outputTupleDesc_.getSlots().get(i).setSourceExpr(
-          result.analyticExprs_.get(i));
-    }
-    LOG.trace("analytictuple=" + result.outputTupleDesc_.debugString());
-    LOG.trace("analytictuplesmap=" + result.analyticTupleSmap_.debugString());
-    LOG.trace("analytic info:\n" + result.debugString());
-    return result;
-  }
-
-  /**
-   * Returns the intersection of the partition exprs of all the
-   * analytic functions.
-   */
-  private List<Expr> computeCommonPartitionExprs() {
-    List<Expr> result = Lists.newArrayList();
-    for (Expr analyticExpr: analyticExprs_) {
-      Preconditions.checkState(analyticExpr.isAnalyzed_);
-      List<Expr> partitionExprs = ((AnalyticExpr) analyticExpr).getPartitionExprs();
-      if (partitionExprs == null) continue;
-      if (result.isEmpty()) {
-        result.addAll(partitionExprs);
-      } else {
-        result.retainAll(partitionExprs);
-        if (result.isEmpty()) break;
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Append ids of all slots that are being referenced in the process
-   * of performing the analytic computation described by this AnalyticInfo.
-   */
-  public void getRefdSlots(List<SlotId> ids) {
-    Preconditions.checkState(intermediateTupleDesc_ != null);
-    Expr.getIds(analyticExprs_, null, ids);
-    // The backend assumes that the entire intermediateTupleDesc is materialized
-    for (SlotDescriptor slotDesc: intermediateTupleDesc_.getSlots()) {
-      ids.add(slotDesc.getId());
-    }
-  }
-
-  @Override
-  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
-    materializedSlots_.clear();
-    List<Expr> exprs = Lists.newArrayList();
-    for (int i = 0; i < analyticExprs_.size(); ++i) {
-      SlotDescriptor outputSlotDesc = outputTupleDesc_.getSlots().get(i);
-      if (!outputSlotDesc.isMaterialized()) continue;
-      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
-      exprs.add(analyticExprs_.get(i));
-      materializedSlots_.add(i);
-    }
-    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
-    analyzer.materializeSlots(resolvedExprs);
-  }
-
-  /**
-   * Validates internal state: Checks that the number of materialized slots of the
-   * analytic tuple corresponds to the number of materialized analytic functions. Also
-   * checks that the return types of the analytic exprs correspond to the slots in the
-   * analytic tuple.
-   */
-  public void checkConsistency() {
-    ArrayList<SlotDescriptor> slots = intermediateTupleDesc_.getSlots();
-
-    // Check materialized slots.
-    int numMaterializedSlots = 0;
-    for (SlotDescriptor slotDesc: slots) {
-      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
-    }
-    Preconditions.checkState(numMaterializedSlots ==
-        materializedSlots_.size());
-
-    // Check that analytic expr return types match the slot descriptors.
-    int slotIdx = 0;
-    for (int i = 0; i < analyticExprs_.size(); ++i) {
-      Expr analyticExpr = analyticExprs_.get(i);
-      Type slotType = slots.get(slotIdx).getType();
-      Preconditions.checkState(analyticExpr.getType().equals(slotType),
-          String.format("Analytic expr %s returns type %s but its analytic tuple " +
-              "slot has type %s", analyticExpr.toSql(),
-              analyticExpr.getType().toString(), slotType.toString()));
-      ++slotIdx;
-    }
-  }
-
-  @Override
-  public String debugString() {
-    StringBuilder out = new StringBuilder(super.debugString());
-    out.append(Objects.toStringHelper(this)
-        .add("analytic_exprs", Expr.debugString(analyticExprs_))
-        .add("smap", analyticTupleSmap_.debugString())
-        .toString());
-    return out.toString();
-  }
-
-  @Override
-  protected String tupleDebugName() { return "analytic-tuple"; }
-
-  @Override
-  public AnalyticInfo clone() { return new AnalyticInfo(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AnalyticWindow.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticWindow.java b/fe/src/main/java/com/cloudera/impala/analysis/AnalyticWindow.java
deleted file mode 100644
index 68558da..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AnalyticWindow.java
+++ /dev/null
@@ -1,417 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAnalyticWindow;
-import com.cloudera.impala.thrift.TAnalyticWindowBoundary;
-import com.cloudera.impala.thrift.TAnalyticWindowBoundaryType;
-import com.cloudera.impala.thrift.TAnalyticWindowType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.util.TColumnValueUtil;
-import com.google.common.base.Preconditions;
-
-
-/**
- * Windowing clause of an analytic expr
- * Both left and right boundaries are always non-null after analyze().
- */
-public class AnalyticWindow {
-  // default window used when an analytic expr was given an order by but no window
-  public static final AnalyticWindow DEFAULT_WINDOW = new AnalyticWindow(Type.RANGE,
-      new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
-      new Boundary(BoundaryType.CURRENT_ROW, null));
-
-  enum Type {
-    ROWS("ROWS"),
-    RANGE("RANGE");
-
-    private final String description_;
-
-    private Type(String d) {
-      description_ = d;
-    }
-
-    @Override
-    public String toString() { return description_; }
-    public TAnalyticWindowType toThrift() {
-      return this == ROWS ? TAnalyticWindowType.ROWS : TAnalyticWindowType.RANGE;
-    }
-  }
-
-  enum BoundaryType {
-    UNBOUNDED_PRECEDING("UNBOUNDED PRECEDING"),
-    UNBOUNDED_FOLLOWING("UNBOUNDED FOLLOWING"),
-    CURRENT_ROW("CURRENT ROW"),
-    PRECEDING("PRECEDING"),
-    FOLLOWING("FOLLOWING");
-
-    private final String description_;
-
-    private BoundaryType(String d) {
-      description_ = d;
-    }
-
-    @Override
-    public String toString() { return description_; }
-    public TAnalyticWindowBoundaryType toThrift() {
-      Preconditions.checkState(!isAbsolutePos());
-      if (this == CURRENT_ROW) {
-        return TAnalyticWindowBoundaryType.CURRENT_ROW;
-      } else if (this == PRECEDING) {
-        return TAnalyticWindowBoundaryType.PRECEDING;
-      } else if (this == FOLLOWING) {
-        return TAnalyticWindowBoundaryType.FOLLOWING;
-      }
-      return null;
-    }
-
-    public boolean isAbsolutePos() {
-      return this == UNBOUNDED_PRECEDING || this == UNBOUNDED_FOLLOWING;
-    }
-
-    public boolean isOffset() {
-      return this == PRECEDING || this == FOLLOWING;
-    }
-
-    public boolean isPreceding() {
-      return this == UNBOUNDED_PRECEDING || this == PRECEDING;
-    }
-
-    public boolean isFollowing() {
-      return this == UNBOUNDED_FOLLOWING || this == FOLLOWING;
-    }
-
-    public BoundaryType converse() {
-      switch (this) {
-        case UNBOUNDED_PRECEDING: return UNBOUNDED_FOLLOWING;
-        case UNBOUNDED_FOLLOWING: return UNBOUNDED_PRECEDING;
-        case PRECEDING: return FOLLOWING;
-        case FOLLOWING: return PRECEDING;
-        default: return CURRENT_ROW;
-      }
-    }
-  }
-
-  public static class Boundary {
-    private final BoundaryType type_;
-
-    // Offset expr. Only set for PRECEDING/FOLLOWING. Needed for toSql().
-    private final Expr expr_;
-
-    // The offset value. Set during analysis after evaluating expr_. Integral valued
-    // for ROWS windows.
-    private BigDecimal offsetValue_;
-
-    public BoundaryType getType() { return type_; }
-    public Expr getExpr() { return expr_; }
-    public BigDecimal getOffsetValue() { return offsetValue_; }
-
-    public Boundary(BoundaryType type, Expr e) {
-      this(type, e, null);
-    }
-
-    // c'tor used by clone()
-    private Boundary(BoundaryType type, Expr e, BigDecimal offsetValue) {
-      Preconditions.checkState(
-        (type.isOffset() && e != null)
-        || (!type.isOffset() && e == null));
-      type_ = type;
-      expr_ = e;
-      offsetValue_ = offsetValue;
-    }
-
-    public String toSql() {
-      StringBuilder sb = new StringBuilder();
-      if (expr_ != null) sb.append(expr_.toSql()).append(" ");
-      sb.append(type_.toString());
-      return sb.toString();
-    }
-
-    public TAnalyticWindowBoundary toThrift(Type windowType) {
-      TAnalyticWindowBoundary result = new TAnalyticWindowBoundary(type_.toThrift());
-      if (type_.isOffset() && windowType == Type.ROWS) {
-        result.setRows_offset_value(offsetValue_.longValue());
-      }
-      // TODO: range windows need range_offset_predicate
-      return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj == null) return false;
-      if (obj.getClass() != this.getClass()) return false;
-      Boundary o = (Boundary)obj;
-      boolean exprEqual = (expr_ == null) == (o.expr_ == null);
-      if (exprEqual && expr_ != null) exprEqual = expr_.equals(o.expr_);
-      return type_ == o.type_ && exprEqual;
-    }
-
-    public Boundary converse() {
-      Boundary result = new Boundary(type_.converse(),
-          (expr_ != null) ? expr_.clone() : null);
-      result.offsetValue_ = offsetValue_;
-      return result;
-    }
-
-    @Override
-    public Boundary clone() {
-      return new Boundary(type_, expr_ != null ? expr_.clone() : null, offsetValue_);
-    }
-
-    public void analyze(Analyzer analyzer) throws AnalysisException {
-      if (expr_ != null) expr_.analyze(analyzer);
-    }
-  }
-
-  private final Type type_;
-  private final Boundary leftBoundary_;
-  private Boundary rightBoundary_;  // may be null before analyze()
-  private String toSqlString_;  // cached after analysis
-
-  public Type getType() { return type_; }
-  public Boundary getLeftBoundary() { return leftBoundary_; }
-  public Boundary getRightBoundary() { return rightBoundary_; }
-  public Boundary setRightBoundary(Boundary b) { return rightBoundary_ = b; }
-
-  public AnalyticWindow(Type type, Boundary b) {
-    type_ = type;
-    Preconditions.checkNotNull(b);
-    leftBoundary_ = b;
-    rightBoundary_ = null;
-  }
-
-  public AnalyticWindow(Type type, Boundary l, Boundary r) {
-    type_ = type;
-    Preconditions.checkNotNull(l);
-    leftBoundary_ = l;
-    Preconditions.checkNotNull(r);
-    rightBoundary_ = r;
-  }
-
-  /**
-   * Clone c'tor
-   */
-  private AnalyticWindow(AnalyticWindow other) {
-    type_ = other.type_;
-    Preconditions.checkNotNull(other.leftBoundary_);
-    leftBoundary_ = other.leftBoundary_.clone();
-    if (other.rightBoundary_ != null) {
-      rightBoundary_ = other.rightBoundary_.clone();
-    }
-    toSqlString_ = other.toSqlString_;  // safe to share
-  }
-
-  public AnalyticWindow reverse() {
-    Boundary newRightBoundary = leftBoundary_.converse();
-    Boundary newLeftBoundary = null;
-    if (rightBoundary_ == null) {
-      newLeftBoundary = new Boundary(leftBoundary_.getType(), null);
-    } else {
-      newLeftBoundary = rightBoundary_.converse();
-    }
-    return new AnalyticWindow(type_, newLeftBoundary, newRightBoundary);
-  }
-
-  public String toSql() {
-    if (toSqlString_ != null) return toSqlString_;
-    StringBuilder sb = new StringBuilder();
-    sb.append(type_.toString()).append(" ");
-    if (rightBoundary_ == null) {
-      sb.append(leftBoundary_.toSql());
-    } else {
-      sb.append("BETWEEN ").append(leftBoundary_.toSql()).append(" AND ");
-      sb.append(rightBoundary_.toSql());
-    }
-    return sb.toString();
-  }
-
-  public TAnalyticWindow toThrift() {
-    TAnalyticWindow result = new TAnalyticWindow(type_.toThrift());
-    if (leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
-      result.setWindow_start(leftBoundary_.toThrift(type_));
-    }
-    Preconditions.checkNotNull(rightBoundary_);
-    if (rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
-      result.setWindow_end(rightBoundary_.toThrift(type_));
-    }
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    AnalyticWindow o = (AnalyticWindow)obj;
-    boolean rightBoundaryEqual =
-        (rightBoundary_ == null) == (o.rightBoundary_ == null);
-    if (rightBoundaryEqual && rightBoundary_ != null) {
-      rightBoundaryEqual = rightBoundary_.equals(o.rightBoundary_);
-    }
-    return type_ == o.type_
-        && leftBoundary_.equals(o.leftBoundary_)
-        && rightBoundaryEqual;
-  }
-
-  @Override
-  public AnalyticWindow clone() { return new AnalyticWindow(this); }
-
-  /**
-   * Semantic analysis for expr of a PRECEDING/FOLLOWING clause.
-   */
-  private void checkOffsetExpr(Analyzer analyzer, Boundary boundary)
-      throws AnalysisException {
-    Preconditions.checkState(boundary.getType().isOffset());
-    Expr e = boundary.getExpr();
-    Preconditions.checkNotNull(e);
-    boolean isPos = true;
-    Double val = null;
-    if (e.isConstant() && e.getType().isNumericType()) {
-      try {
-        val = TColumnValueUtil.getNumericVal(
-            FeSupport.EvalConstExpr(e, analyzer.getQueryCtx()));
-        if (val <= 0) isPos = false;
-      } catch (InternalException exc) {
-        throw new AnalysisException(
-            "Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage());
-      }
-    }
-
-    if (type_ == Type.ROWS) {
-      if (!e.isConstant() || !e.getType().isIntegerType() || !isPos) {
-        throw new AnalysisException(
-            "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
-              + "constant positive integer: " + boundary.toSql());
-      }
-      Preconditions.checkNotNull(val);
-      boundary.offsetValue_ = new BigDecimal(val.longValue());
-    } else {
-      if (!e.isConstant() || !e.getType().isNumericType() || !isPos) {
-        throw new AnalysisException(
-            "For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
-              + "constant positive number: " + boundary.toSql());
-      }
-      boundary.offsetValue_ = new BigDecimal(val);
-    }
-  }
-
-  /**
-   * Check that b1 <= b2.
-   */
-  private void checkOffsetBoundaries(Analyzer analyzer, Boundary b1, Boundary b2)
-      throws AnalysisException {
-    Preconditions.checkState(b1.getType().isOffset());
-    Preconditions.checkState(b2.getType().isOffset());
-    Expr e1 = b1.getExpr();
-    Preconditions.checkState(
-        e1 != null && e1.isConstant() && e1.getType().isNumericType());
-    Expr e2 = b2.getExpr();
-    Preconditions.checkState(
-        e2 != null && e2.isConstant() && e2.getType().isNumericType());
-
-    try {
-      TColumnValue val1 = FeSupport.EvalConstExpr(e1, analyzer.getQueryCtx());
-      TColumnValue val2 = FeSupport.EvalConstExpr(e2, analyzer.getQueryCtx());
-      double left = TColumnValueUtil.getNumericVal(val1);
-      double right = TColumnValueUtil.getNumericVal(val2);
-      if (left > right) {
-        throw new AnalysisException(
-            "Offset boundaries are in the wrong order: " + toSql());
-      }
-    } catch (InternalException exc) {
-      throw new AnalysisException(
-          "Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage());
-    }
-
-  }
-
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    leftBoundary_.analyze(analyzer);
-    if (rightBoundary_ != null) rightBoundary_.analyze(analyzer);
-
-    if (leftBoundary_.getType() == BoundaryType.UNBOUNDED_FOLLOWING) {
-      throw new AnalysisException(
-          leftBoundary_.getType().toString() + " is only allowed for upper bound of "
-            + "BETWEEN");
-    }
-    if (rightBoundary_ != null
-        && rightBoundary_.getType() == BoundaryType.UNBOUNDED_PRECEDING) {
-      throw new AnalysisException(
-          rightBoundary_.getType().toString() + " is only allowed for lower bound of "
-            + "BETWEEN");
-    }
-
-    // TODO: Remove when RANGE windows with offset boundaries are supported.
-    if (type_ == Type.RANGE) {
-      if (leftBoundary_.type_.isOffset()
-          || (rightBoundary_ != null && rightBoundary_.type_.isOffset())
-          || (leftBoundary_.type_ == BoundaryType.CURRENT_ROW
-              && (rightBoundary_ == null
-                  || rightBoundary_.type_ == BoundaryType.CURRENT_ROW))) {
-        throw new AnalysisException(
-            "RANGE is only supported with both the lower and upper bounds UNBOUNDED or"
-            + " one UNBOUNDED and the other CURRENT ROW.");
-      }
-    }
-
-    if (rightBoundary_ == null && leftBoundary_.getType() == BoundaryType.FOLLOWING) {
-      throw new AnalysisException(
-          leftBoundary_.getType().toString() + " requires a BETWEEN clause");
-    }
-
-    if (leftBoundary_.getType().isOffset()) checkOffsetExpr(analyzer, leftBoundary_);
-    if (rightBoundary_ == null) {
-      // set right boundary to implied value, but make sure to cache toSql string
-      // beforehand
-      toSqlString_ = toSql();
-      rightBoundary_ = new Boundary(BoundaryType.CURRENT_ROW, null);
-      return;
-    }
-    if (rightBoundary_.getType().isOffset()) checkOffsetExpr(analyzer, rightBoundary_);
-
-    if (leftBoundary_.getType() == BoundaryType.FOLLOWING) {
-      if (rightBoundary_.getType() != BoundaryType.FOLLOWING
-          && rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
-        throw new AnalysisException(
-            "A lower window bound of " + BoundaryType.FOLLOWING.toString()
-              + " requires that the upper bound also be "
-              + BoundaryType.FOLLOWING.toString());
-      }
-      if (rightBoundary_.getType() != BoundaryType.UNBOUNDED_FOLLOWING) {
-        checkOffsetBoundaries(analyzer, leftBoundary_, rightBoundary_);
-      }
-    }
-
-    if (rightBoundary_.getType() == BoundaryType.PRECEDING) {
-      if (leftBoundary_.getType() != BoundaryType.PRECEDING
-          && leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
-        throw new AnalysisException(
-            "An upper window bound of " + BoundaryType.PRECEDING.toString()
-              + " requires that the lower bound also be "
-              + BoundaryType.PRECEDING.toString());
-      }
-      if (leftBoundary_.getType() != BoundaryType.UNBOUNDED_PRECEDING) {
-        checkOffsetBoundaries(analyzer, rightBoundary_, leftBoundary_);
-      }
-    }
-  }
-}


[14/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/DescribeResultFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/DescribeResultFactory.java b/fe/src/main/java/com/cloudera/impala/service/DescribeResultFactory.java
deleted file mode 100644
index c1a9557..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/DescribeResultFactory.java
+++ /dev/null
@@ -1,246 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
-import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TResultRow;
-import com.google.common.collect.Lists;
-
-/*
- * Builds results for DESCRIBE DATABASE statements by constructing and
- * populating a TDescribeResult object.
- */
-public class DescribeResultFactory {
-  // Number of columns in each row of the DESCRIBE FORMATTED|EXTENDED result set.
-  private final static int NUM_DESC_FORMATTED_RESULT_COLS = 3;
-  // Empty column used to format description output table.
-  private final static TColumnValue EMPTY = new TColumnValue().setString_val("");
-
-  public static TDescribeResult buildDescribeDbResult(Db db,
-    TDescribeOutputStyle outputFormat) {
-    switch (outputFormat) {
-      case MINIMAL: return describeDbMinimal(db);
-      case FORMATTED:
-      case EXTENDED:
-        return describeDbExtended(db);
-      default: throw new UnsupportedOperationException(
-          "Unknown TDescribeOutputStyle value for describe database: " + outputFormat);
-    }
-  }
-
-  /*
-   * Builds results for a DESCRIBE DATABASE <db> command. This consists of the database
-   * location and comment.
-   */
-  private static TDescribeResult describeDbMinimal(Db db) {
-    TDescribeResult descResult = new TDescribeResult();
-
-    org.apache.hadoop.hive.metastore.api.Database msDb = db.getMetaStoreDb();
-    descResult.results = Lists.newArrayList();
-    String location = null;
-    String comment = null;
-    if(msDb != null) {
-      location = msDb.getLocationUri();
-      comment = msDb.getDescription();
-    }
-
-    TColumnValue dbNameCol = new TColumnValue();
-    dbNameCol.setString_val(db.getName());
-    TColumnValue dbLocationCol = new TColumnValue();
-    dbLocationCol.setString_val(Objects.toString(location, ""));
-    TColumnValue commentCol = new TColumnValue();
-    commentCol.setString_val(Objects.toString(comment, ""));
-    descResult.results.add(
-        new TResultRow(Lists.newArrayList(dbNameCol, dbLocationCol, commentCol)));
-    return descResult;
-  }
-
-  /*
-   * Helper function used to build privilege results.
-   */
-  private static void buildPrivilegeResult(
-      TDescribeResult descResult, Map<String, List<PrivilegeGrantInfo>> privilegeMap) {
-    if (privilegeMap == null) return;
-
-    for (Map.Entry<String, List<PrivilegeGrantInfo>> privilegeEntry:
-        privilegeMap.entrySet()) {
-      TColumnValue title = new TColumnValue();
-      title.setString_val("Privileges for " + privilegeEntry.getKey() + ": ");
-      descResult.results.add(
-          new TResultRow(Lists.newArrayList(title, EMPTY, EMPTY)));
-      for (PrivilegeGrantInfo privilegeInfo: privilegeEntry.getValue()) {
-        TColumnValue privilege = new TColumnValue();
-        privilege.setString_val(
-            privilegeInfo.getPrivilege() + " " + privilegeInfo.isGrantOption());
-        TColumnValue grantor = new TColumnValue();
-        grantor.setString_val(
-            privilegeInfo.getGrantor() + " " + privilegeInfo.getGrantorType());
-        TColumnValue grantTime = new TColumnValue();
-        grantTime.setString_val(privilegeInfo.getCreateTime() + "");
-        descResult.results.add(
-            new TResultRow(Lists.newArrayList(privilege, grantor, grantTime)));
-      }
-    }
-  }
-
-  /*
-   * Builds a TDescribeResult that contains the result of a DESCRIBE FORMATTED|EXTENDED
-   * DATABASE <db> command. Output all the database's properties.
-   */
-  private static TDescribeResult describeDbExtended(Db db) {
-    TDescribeResult descResult = describeDbMinimal(db);
-    org.apache.hadoop.hive.metastore.api.Database msDb = db.getMetaStoreDb();
-    String ownerName = null;
-    PrincipalType ownerType = null;
-    Map<String, String> params = null;
-    PrincipalPrivilegeSet privileges = null;
-    if(msDb != null) {
-      ownerName = msDb.getOwnerName();
-      ownerType = msDb.getOwnerType();
-      params = msDb.getParameters();
-      privileges = msDb.getPrivileges();
-    }
-
-    if (ownerName != null && ownerType != null) {
-      TColumnValue owner = new TColumnValue();
-      owner.setString_val("Owner: ");
-      TResultRow ownerRow =
-          new TResultRow(Lists.newArrayList(owner, EMPTY, EMPTY));
-      descResult.results.add(ownerRow);
-
-      TColumnValue ownerNameCol = new TColumnValue();
-      ownerNameCol.setString_val(Objects.toString(ownerName, ""));
-      TColumnValue ownerTypeCol = new TColumnValue();
-      ownerTypeCol.setString_val(Objects.toString(ownerType, ""));
-      descResult.results.add(
-          new TResultRow(Lists.newArrayList(EMPTY, ownerNameCol, ownerTypeCol)));
-    }
-
-    if (params != null && params.size() > 0) {
-      TColumnValue parameter = new TColumnValue();
-      parameter.setString_val("Parameter: ");
-      TResultRow parameterRow =
-          new TResultRow(Lists.newArrayList(parameter, EMPTY, EMPTY));
-      descResult.results.add(parameterRow);
-      for (Map.Entry<String, String> param: params.entrySet()) {
-        TColumnValue key = new TColumnValue();
-        key.setString_val(Objects.toString(param.getKey(), ""));
-        TColumnValue val = new TColumnValue();
-        val.setString_val(Objects.toString(param.getValue(), ""));
-        descResult.results.add(
-            new TResultRow(Lists.newArrayList(EMPTY, key, val)));
-      }
-    }
-
-    // Currently we only retrieve privileges stored in hive metastore.
-    // TODO: Retrieve privileges from Catalog
-    if (privileges != null) {
-      buildPrivilegeResult(descResult, privileges.getUserPrivileges());
-      buildPrivilegeResult(descResult, privileges.getGroupPrivileges());
-      buildPrivilegeResult(descResult, privileges.getRolePrivileges());
-    }
-    return descResult;
-  }
-
-  /*
-   * Builds a TDescribeResult that contains the result of a DESCRIBE FORMATTED|EXTENDED
-   * <table> command. For the formatted describe output the goal is to be exactly the
-   * same as what Hive (via HiveServer2) outputs, for compatibility reasons. To do this,
-   * Hive's MetadataFormatUtils class is used to build the results.
-   */
-  public static TDescribeResult buildDescribeFormattedResult(Table table) {
-    TDescribeResult descResult = new TDescribeResult();
-    descResult.results = Lists.newArrayList();
-
-    org.apache.hadoop.hive.metastore.api.Table msTable =
-        table.getMetaStoreTable().deepCopy();
-    // For some table formats (e.g. Avro) the column list in the table can differ from the
-    // one returned by the Hive metastore. To handle this we use the column list from the
-    // table which has already reconciled those differences.
-    msTable.getSd().setCols(Column.toFieldSchemas(table.getNonClusteringColumns()));
-    msTable.setPartitionKeys(Column.toFieldSchemas(table.getClusteringColumns()));
-
-    // To avoid initializing any of the SerDe classes in the metastore table Thrift
-    // struct, create the ql.metadata.Table object by calling the empty c'tor and
-    // then calling setTTable().
-    org.apache.hadoop.hive.ql.metadata.Table hiveTable =
-        new org.apache.hadoop.hive.ql.metadata.Table();
-    hiveTable.setTTable(msTable);
-    StringBuilder sb = new StringBuilder();
-    // First add all the columns (includes partition columns).
-    sb.append(MetaDataFormatUtils.getAllColumnsInformation(msTable.getSd().getCols(),
-        msTable.getPartitionKeys(), true, false, true));
-    // Add the extended table metadata information.
-    sb.append(MetaDataFormatUtils.getTableInformation(hiveTable));
-
-    for (String line: sb.toString().split("\n")) {
-      // To match Hive's HiveServer2 output, split each line into multiple column
-      // values based on the field delimiter.
-      String[] columns = line.split(MetaDataFormatUtils.FIELD_DELIM);
-      TResultRow resultRow = new TResultRow();
-      for (int i = 0; i < NUM_DESC_FORMATTED_RESULT_COLS; ++i) {
-        TColumnValue colVal = new TColumnValue();
-        colVal.setString_val(null);
-        if (columns.length > i) {
-          // Add the column value.
-          colVal.setString_val(columns[i]);
-        }
-        resultRow.addToColVals(colVal);
-      }
-      descResult.results.add(resultRow);
-    }
-    return descResult;
-  }
-
-  /*
-   * Builds a TDescribeResult that contains the result of a DESCRIBE <path> command:
-   * the names and types of fields of the table or complex type referred to by the path.
-   */
-  public static TDescribeResult buildDescribeMinimalResult(StructType type) {
-    TDescribeResult descResult = new TDescribeResult();
-    descResult.results = Lists.newArrayList();
-
-    for (StructField field: type.getFields()) {
-      TColumnValue colNameCol = new TColumnValue();
-      colNameCol.setString_val(field.getName());
-      TColumnValue dataTypeCol = new TColumnValue();
-      dataTypeCol.setString_val(field.getType().prettyPrint().toLowerCase());
-      TColumnValue commentCol = new TColumnValue();
-      commentCol.setString_val(field.getComment() != null ? field.getComment() : "");
-      descResult.results.add(
-          new TResultRow(Lists.newArrayList(colNameCol, dataTypeCol, commentCol)));
-    }
-    return descResult;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/FeSupport.java b/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
deleted file mode 100644
index 4014129..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
+++ /dev/null
@@ -1,292 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.thrift.TDeserializer;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TCacheJarParams;
-import com.cloudera.impala.thrift.TCacheJarResult;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCatalogServiceRequestHeader;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprBatch;
-import com.cloudera.impala.thrift.TPrioritizeLoadRequest;
-import com.cloudera.impala.thrift.TPrioritizeLoadResponse;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TStartupOptions;
-import com.cloudera.impala.thrift.TSymbolLookupParams;
-import com.cloudera.impala.thrift.TSymbolLookupResult;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.util.NativeLibUtil;
-import com.google.common.base.Preconditions;
-
-/**
- * This class provides the Impala executor functionality to the FE.
- * fe-support.cc implements all the native calls.
- * If the planner is executed inside Impalad, Impalad would have registered all the JNI
- * native functions already. There's no need to load the shared library.
- * For unit test (mvn test), load the shared library because the native function has not
- * been loaded yet.
- */
-public class FeSupport {
-  private final static Logger LOG = LoggerFactory.getLogger(FeSupport.class);
-  private static boolean loaded_ = false;
-
-  // Only called if this library is explicitly loaded. This only happens
-  // when running FE tests.
-  public native static void NativeFeTestInit();
-
-  // Returns a serialized TResultRow
-  public native static byte[] NativeEvalConstExprs(byte[] thriftExprBatch,
-      byte[] thriftQueryGlobals);
-
-  // Returns a serialized TSymbolLookupResult
-  public native static byte[] NativeLookupSymbol(byte[] thriftSymbolLookup);
-
-  // Returns a serialized TCacheJarResult
-  public native static byte[] NativeCacheJar(byte[] thriftCacheJar);
-
-  // Does an RPCs to the Catalog Server to prioritize the metadata loading of a
-  // one or more catalog objects. To keep our kerberos configuration consolidated,
-  // we make make all RPCs in the BE layer instead of calling the Catalog Server
-  // using Java Thrift bindings.
-  public native static byte[] NativePrioritizeLoad(byte[] thriftReq);
-
-  // Return select BE startup options as a serialized TStartupOptions
-  public native static byte[] NativeGetStartupOptions();
-
-  /**
-   * Locally caches the jar at the specified HDFS location.
-   *
-   * @param hdfsLocation The path to the jar in HDFS
-   * @return The result of the call to cache the jar, includes a status and the local
-   *         path of the cached jar if the operation was successful.
-   */
-  public static TCacheJarResult CacheJar(String hdfsLocation) throws InternalException {
-    Preconditions.checkNotNull(hdfsLocation);
-    TCacheJarParams params = new TCacheJarParams(hdfsLocation);
-    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
-    byte[] result;
-    try {
-      result = CacheJar(serializer.serialize(params));
-      Preconditions.checkNotNull(result);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TCacheJarResult thriftResult = new TCacheJarResult();
-      deserializer.deserialize(thriftResult, result);
-      return thriftResult;
-    } catch (TException e) {
-      // this should never happen
-      throw new InternalException(
-          "Couldn't cache jar at HDFS location " + hdfsLocation, e);
-    }
-  }
-
-  private static byte[] CacheJar(byte[] thriftParams) {
-    try {
-      return NativeCacheJar(thriftParams);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-    }
-    return NativeCacheJar(thriftParams);
-  }
-
-  public static TColumnValue EvalConstExpr(Expr expr, TQueryCtx queryCtx)
-      throws InternalException {
-    Preconditions.checkState(expr.isConstant());
-    TExprBatch exprBatch = new TExprBatch();
-    exprBatch.addToExprs(expr.treeToThrift());
-    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
-    byte[] result;
-    try {
-      result = EvalConstExprs(serializer.serialize(exprBatch),
-          serializer.serialize(queryCtx));
-      Preconditions.checkNotNull(result);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TResultRow val = new TResultRow();
-      deserializer.deserialize(val, result);
-      Preconditions.checkState(val.getColValsSize() == 1);
-      return val.getColVals().get(0);
-    } catch (TException e) {
-      // this should never happen
-      throw new InternalException("couldn't execute expr " + expr.toSql(), e);
-    }
-  }
-
-  private static byte[] LookupSymbol(byte[] thriftParams) {
-    try {
-      return NativeLookupSymbol(thriftParams);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-    }
-    return NativeLookupSymbol(thriftParams);
-  }
-
-  public static TSymbolLookupResult LookupSymbol(TSymbolLookupParams params)
-      throws InternalException {
-    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
-    try {
-      byte[] resultBytes = LookupSymbol(serializer.serialize(params));
-      Preconditions.checkNotNull(resultBytes);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TSymbolLookupResult result = new TSymbolLookupResult();
-      deserializer.deserialize(result, resultBytes);
-      return result;
-    } catch (TException e) {
-      // this should never happen
-      throw new InternalException("couldn't perform symbol lookup.", e);
-    }
-  }
-
-  private static byte[] EvalConstExprs(byte[] thriftExprBatch,
-      byte[] thriftQueryContext) {
-    try {
-      return NativeEvalConstExprs(thriftExprBatch, thriftQueryContext);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-    }
-    return NativeEvalConstExprs(thriftExprBatch, thriftQueryContext);
-  }
-
-  public static boolean EvalPredicate(Expr pred, TQueryCtx queryCtx)
-      throws InternalException {
-    // Shortcuts to avoid expensive BE evaluation.
-    if (pred instanceof BoolLiteral) return ((BoolLiteral) pred).getValue();
-    if (pred instanceof NullLiteral) return false;
-    Preconditions.checkState(pred.getType().isBoolean());
-    TColumnValue val = EvalConstExpr(pred, queryCtx);
-    // Return false if pred evaluated to false or NULL. True otherwise.
-    return val.isBool_val() && val.bool_val;
-  }
-
-  /**
-   * Evaluate a batch of predicates in the BE. The results are stored in a
-   * TResultRow object, where each TColumnValue in it stores the result of
-   * a predicate evaluation.
-   *
-   * TODO: This function is currently used for improving the performance of
-   * partition pruning (see IMPALA-887), hence it only supports boolean
-   * exprs. In the future, we can extend it to support arbitrary constant exprs.
-   */
-  public static TResultRow EvalPredicateBatch(ArrayList<Expr> exprs,
-      TQueryCtx queryCtx) throws InternalException {
-    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
-    TExprBatch exprBatch = new TExprBatch();
-    for (Expr expr: exprs) {
-      // Make sure we only process boolean exprs.
-      Preconditions.checkState(expr.getType().isBoolean());
-      Preconditions.checkState(expr.isConstant());
-      exprBatch.addToExprs(expr.treeToThrift());
-    }
-    byte[] result;
-    try {
-      result = EvalConstExprs(serializer.serialize(exprBatch),
-          serializer.serialize(queryCtx));
-      Preconditions.checkNotNull(result);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TResultRow val = new TResultRow();
-      deserializer.deserialize(val, result);
-      return val;
-    } catch (TException e) {
-      // this should never happen
-      throw new InternalException("couldn't execute a batch of exprs.", e);
-    }
-  }
-
-  private static byte[] PrioritizeLoad(byte[] thriftReq) {
-    try {
-      return NativePrioritizeLoad(thriftReq);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-    }
-    return NativePrioritizeLoad(thriftReq);
-  }
-
-  public static TStatus PrioritizeLoad(Set<TableName> tableNames)
-      throws InternalException {
-    Preconditions.checkNotNull(tableNames);
-
-    List<TCatalogObject> objectDescs = new ArrayList<TCatalogObject>(tableNames.size());
-    for (TableName tableName: tableNames) {
-      TCatalogObject catalogObject = new TCatalogObject();
-      catalogObject.setType(TCatalogObjectType.TABLE);
-      catalogObject.setTable(new TTable(tableName.getDb(), tableName.getTbl()));
-      objectDescs.add(catalogObject);
-    }
-
-    TPrioritizeLoadRequest request = new TPrioritizeLoadRequest ();
-    request.setHeader(new TCatalogServiceRequestHeader());
-    request.setObject_descs(objectDescs);
-
-    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
-    try {
-      byte[] result = PrioritizeLoad(serializer.serialize(request));
-      Preconditions.checkNotNull(result);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TPrioritizeLoadResponse response = new TPrioritizeLoadResponse();
-      deserializer.deserialize(response, result);
-      return response.getStatus();
-    } catch (TException e) {
-      // this should never happen
-      throw new InternalException("Error processing request: " + e.getMessage(), e);
-    }
-  }
-
-  public static TStartupOptions GetStartupOptions() throws InternalException {
-    try {
-      byte[] result = NativeGetStartupOptions();
-      Preconditions.checkNotNull(result);
-      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
-      TStartupOptions options = new TStartupOptions();
-      deserializer.deserialize(options, result);
-      return options;
-    } catch (TException e) {
-      throw new InternalException("Error retrieving startup options: " + e.getMessage(),
-          e);
-    }
-  }
-
-  /**
-   * This function should only be called explicitly by the FeSupport to ensure that
-   * native functions are loaded.
-   */
-  private static synchronized void loadLibrary() {
-    if (loaded_) return;
-    LOG.info("Loading libfesupport.so");
-    NativeLibUtil.loadLibrary("libfesupport.so");
-    LOG.info("Loaded libfesupport.so");
-    loaded_ = true;
-    NativeFeTestInit();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/Frontend.java b/fe/src/main/java/com/cloudera/impala/service/Frontend.java
deleted file mode 100644
index 2d38396..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/Frontend.java
+++ /dev/null
@@ -1,1231 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.cloudera.impala.catalog.KuduTable;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.CreateDataSrcStmt;
-import com.cloudera.impala.analysis.CreateDropRoleStmt;
-import com.cloudera.impala.analysis.CreateUdaStmt;
-import com.cloudera.impala.analysis.CreateUdfStmt;
-import com.cloudera.impala.analysis.DropDataSrcStmt;
-import com.cloudera.impala.analysis.DropFunctionStmt;
-import com.cloudera.impala.analysis.DropStatsStmt;
-import com.cloudera.impala.analysis.DropTableOrViewStmt;
-import com.cloudera.impala.analysis.GrantRevokePrivStmt;
-import com.cloudera.impala.analysis.GrantRevokeRoleStmt;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.ResetMetadataStmt;
-import com.cloudera.impala.analysis.ShowFunctionsStmt;
-import com.cloudera.impala.analysis.ShowGrantRoleStmt;
-import com.cloudera.impala.analysis.ShowRolesStmt;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.analysis.TruncateStmt;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.authorization.AuthorizationChecker;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.ImpalaInternalAdminUser;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableId;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.planner.PlanFragment;
-import com.cloudera.impala.planner.Planner;
-import com.cloudera.impala.planner.ScanNode;
-import com.cloudera.impala.thrift.TCatalogOpRequest;
-import com.cloudera.impala.thrift.TCatalogOpType;
-import com.cloudera.impala.thrift.TCatalogServiceRequestHeader;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TDdlType;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeResult;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TExecRequest;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExplainResult;
-import com.cloudera.impala.thrift.TFinalizeParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TLoadDataResp;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TPlanFragment;
-import com.cloudera.impala.thrift.TPlanFragmentTree;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryExecRequest;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TShowFilesParams;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TStmtType;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheResponse;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.cloudera.impala.util.EventSequence;
-import com.cloudera.impala.util.MembershipSnapshot;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.cloudera.impala.util.TSessionStateUtil;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Frontend API for the impalad process.
- * This class allows the impala daemon to create TQueryExecRequest
- * in response to TClientRequests. Also handles management of the authorization
- * policy.
- */
-public class Frontend {
-  private final static Logger LOG = LoggerFactory.getLogger(Frontend.class);
-  // Time to wait for missing tables to be loaded before timing out.
-  private final long MISSING_TBL_LOAD_WAIT_TIMEOUT_MS = 2 * 60 * 1000;
-
-  // Max time to wait for a catalog update notification.
-  private final long MAX_CATALOG_UPDATE_WAIT_TIME_MS = 2 * 1000;
-
-  //TODO: Make the reload interval configurable.
-  private static final int AUTHORIZATION_POLICY_RELOAD_INTERVAL_SECS = 5 * 60;
-
-  private ImpaladCatalog impaladCatalog_;
-  private final AuthorizationConfig authzConfig_;
-  private final AtomicReference<AuthorizationChecker> authzChecker_;
-  private final ScheduledExecutorService policyReader_ =
-      Executors.newScheduledThreadPool(1);
-
-  public Frontend(AuthorizationConfig authorizationConfig) {
-    this(authorizationConfig, new ImpaladCatalog());
-  }
-
-  /**
-   * C'tor used by tests to pass in a custom ImpaladCatalog.
-   */
-  public Frontend(AuthorizationConfig authorizationConfig, ImpaladCatalog catalog) {
-    authzConfig_ = authorizationConfig;
-    impaladCatalog_ = catalog;
-    authzChecker_ = new AtomicReference<AuthorizationChecker>(
-        new AuthorizationChecker(authzConfig_, impaladCatalog_.getAuthPolicy()));
-    // If authorization is enabled, reload the policy on a regular basis.
-    if (authzConfig_.isEnabled() && authzConfig_.isFileBasedPolicy()) {
-      // Stagger the reads across nodes
-      Random randomGen = new Random(UUID.randomUUID().hashCode());
-      int delay = AUTHORIZATION_POLICY_RELOAD_INTERVAL_SECS + randomGen.nextInt(60);
-
-      policyReader_.scheduleAtFixedRate(
-          new AuthorizationPolicyReader(authzConfig_),
-          delay, AUTHORIZATION_POLICY_RELOAD_INTERVAL_SECS, TimeUnit.SECONDS);
-    }
-  }
-
-  /**
-   * Reads (and caches) an authorization policy from HDFS.
-   */
-  private class AuthorizationPolicyReader implements Runnable {
-    private final AuthorizationConfig config_;
-
-    public AuthorizationPolicyReader(AuthorizationConfig config) {
-      config_ = config;
-    }
-
-    @Override
-    public void run() {
-      try {
-        LOG.info("Reloading authorization policy file from: " + config_.getPolicyFile());
-        authzChecker_.set(new AuthorizationChecker(config_,
-            getCatalog().getAuthPolicy()));
-      } catch (Exception e) {
-        LOG.error("Error reloading policy file: ", e);
-      }
-    }
-  }
-
-  public ImpaladCatalog getCatalog() { return impaladCatalog_; }
-  public AuthorizationChecker getAuthzChecker() { return authzChecker_.get(); }
-
-  public TUpdateCatalogCacheResponse updateCatalogCache(
-      TUpdateCatalogCacheRequest req) throws CatalogException {
-    ImpaladCatalog catalog = impaladCatalog_;
-
-    // If this is not a delta, this update should replace the current
-    // Catalog contents so create a new catalog and populate it.
-    if (!req.is_delta) catalog = new ImpaladCatalog();
-
-    TUpdateCatalogCacheResponse response = catalog.updateCatalog(req);
-
-    if (!req.is_delta) {
-      // This was not a delta update. Now that the catalog has been updated,
-      // replace the references to impaladCatalog_/authzChecker_ ensure
-      // clients continue don't see the catalog disappear.
-      impaladCatalog_ = catalog;
-      authzChecker_.set(new AuthorizationChecker(authzConfig_,
-          impaladCatalog_.getAuthPolicy()));
-    }
-    return response;
-  }
-
-  /**
-   * Update the cluster membership snapshot with the latest snapshot from the backend.
-   */
-  public void updateMembership(TUpdateMembershipRequest req) {
-    MembershipSnapshot.update(req);
-  }
-
-  /**
-   * Constructs a TCatalogOpRequest and attaches it, plus any metadata, to the
-   * result argument.
-   */
-  private void createCatalogOpRequest(AnalysisContext.AnalysisResult analysis,
-      TExecRequest result) throws InternalException {
-    TCatalogOpRequest ddl = new TCatalogOpRequest();
-    TResultSetMetadata metadata = new TResultSetMetadata();
-    if (analysis.isUseStmt()) {
-      ddl.op_type = TCatalogOpType.USE;
-      ddl.setUse_db_params(analysis.getUseStmt().toThrift());
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isShowTablesStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_TABLES;
-      ddl.setShow_tables_params(analysis.getShowTablesStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift())));
-    } else if (analysis.isShowDbsStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_DBS;
-      ddl.setShow_dbs_params(analysis.getShowDbsStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift()),
-          new TColumn("comment", Type.STRING.toThrift())));
-    } else if (analysis.isShowDataSrcsStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_DATA_SRCS;
-      ddl.setShow_data_srcs_params(analysis.getShowDataSrcsStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift()),
-          new TColumn("location", Type.STRING.toThrift()),
-          new TColumn("class name", Type.STRING.toThrift()),
-          new TColumn("api version", Type.STRING.toThrift())));
-    } else if (analysis.isShowStatsStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_STATS;
-      ddl.setShow_stats_params(analysis.getShowStatsStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift())));
-    } else if (analysis.isShowFunctionsStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_FUNCTIONS;
-      ShowFunctionsStmt stmt = (ShowFunctionsStmt)analysis.getStmt();
-      ddl.setShow_fns_params(stmt.toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("return type", Type.STRING.toThrift()),
-          new TColumn("signature", Type.STRING.toThrift()),
-          new TColumn("binary type", Type.STRING.toThrift()),
-          new TColumn("is persistent", Type.STRING.toThrift())));
-    } else if (analysis.isShowCreateTableStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_CREATE_TABLE;
-      ddl.setShow_create_table_params(analysis.getShowCreateTableStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("result", Type.STRING.toThrift())));
-    } else if (analysis.isShowCreateFunctionStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_CREATE_FUNCTION;
-      ddl.setShow_create_function_params(analysis.getShowCreateFunctionStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("result", Type.STRING.toThrift())));
-    } else if (analysis.isShowFilesStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_FILES;
-      ddl.setShow_files_params(analysis.getShowFilesStmt().toThrift());
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDescribeDbStmt()) {
-      ddl.op_type = TCatalogOpType.DESCRIBE_DB;
-      ddl.setDescribe_db_params(analysis.getDescribeDbStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift()),
-          new TColumn("location", Type.STRING.toThrift()),
-          new TColumn("comment", Type.STRING.toThrift())));
-    } else if (analysis.isDescribeTableStmt()) {
-      ddl.op_type = TCatalogOpType.DESCRIBE_TABLE;
-      ddl.setDescribe_table_params(analysis.getDescribeTableStmt().toThrift());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift()),
-          new TColumn("type", Type.STRING.toThrift()),
-          new TColumn("comment", Type.STRING.toThrift())));
-    } else if (analysis.isAlterTableStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.ALTER_TABLE);
-      req.setAlter_table_params(analysis.getAlterTableStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isAlterViewStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.ALTER_VIEW);
-      req.setAlter_view_params(analysis.getAlterViewStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateTableStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_TABLE);
-      req.setCreate_table_params(analysis.getCreateTableStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateTableAsSelectStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_TABLE_AS_SELECT);
-      req.setCreate_table_params(
-          analysis.getCreateTableAsSelectStmt().getCreateStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Arrays.asList(
-          new TColumn("summary", Type.STRING.toThrift())));
-    } else if (analysis.isCreateTableLikeStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_TABLE_LIKE);
-      req.setCreate_table_like_params(analysis.getCreateTableLikeStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateViewStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_VIEW);
-      req.setCreate_view_params(analysis.getCreateViewStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateDbStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_DATABASE);
-      req.setCreate_db_params(analysis.getCreateDbStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateUdfStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      CreateUdfStmt stmt = (CreateUdfStmt) analysis.getStmt();
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_FUNCTION);
-      req.setCreate_fn_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateUdaStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_FUNCTION);
-      CreateUdaStmt stmt = (CreateUdaStmt)analysis.getStmt();
-      req.setCreate_fn_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isCreateDataSrcStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.CREATE_DATA_SOURCE);
-      CreateDataSrcStmt stmt = (CreateDataSrcStmt)analysis.getStmt();
-      req.setCreate_data_source_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isComputeStatsStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.COMPUTE_STATS);
-      req.setCompute_stats_params(analysis.getComputeStatsStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDropDbStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.DROP_DATABASE);
-      req.setDrop_db_params(analysis.getDropDbStmt().toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDropTableOrViewStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      DropTableOrViewStmt stmt = analysis.getDropTableOrViewStmt();
-      req.setDdl_type(stmt.isDropTable() ? TDdlType.DROP_TABLE : TDdlType.DROP_VIEW);
-      req.setDrop_table_or_view_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isTruncateStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      TruncateStmt stmt = analysis.getTruncateStmt();
-      req.setDdl_type(TDdlType.TRUNCATE_TABLE);
-      req.setTruncate_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDropFunctionStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.DROP_FUNCTION);
-      DropFunctionStmt stmt = (DropFunctionStmt)analysis.getStmt();
-      req.setDrop_fn_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDropDataSrcStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.DROP_DATA_SOURCE);
-      DropDataSrcStmt stmt = (DropDataSrcStmt)analysis.getStmt();
-      req.setDrop_data_source_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isDropStatsStmt()) {
-      ddl.op_type = TCatalogOpType.DDL;
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(TDdlType.DROP_STATS);
-      DropStatsStmt stmt = (DropStatsStmt) analysis.getStmt();
-      req.setDrop_stats_params(stmt.toThrift());
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isResetMetadataStmt()) {
-      ddl.op_type = TCatalogOpType.RESET_METADATA;
-      ResetMetadataStmt resetMetadataStmt = (ResetMetadataStmt) analysis.getStmt();
-      TResetMetadataRequest req = resetMetadataStmt.toThrift();
-      ddl.setReset_metadata_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isShowRolesStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_ROLES;
-      ShowRolesStmt showRolesStmt = (ShowRolesStmt) analysis.getStmt();
-      ddl.setShow_roles_params(showRolesStmt.toThrift());
-      Set<String> groupNames =
-          getAuthzChecker().getUserGroups(analysis.getAnalyzer().getUser());
-      // Check if the user is part of the group (case-sensitive) this SHOW ROLE
-      // statement is targeting. If they are already a member of the group,
-      // the admin requirement can be removed.
-      Preconditions.checkState(ddl.getShow_roles_params().isSetIs_admin_op());
-      if (ddl.getShow_roles_params().isSetGrant_group() &&
-          groupNames.contains(ddl.getShow_roles_params().getGrant_group())) {
-        ddl.getShow_roles_params().setIs_admin_op(false);
-      }
-      metadata.setColumns(Arrays.asList(
-          new TColumn("role_name", Type.STRING.toThrift())));
-    } else if (analysis.isShowGrantRoleStmt()) {
-      ddl.op_type = TCatalogOpType.SHOW_GRANT_ROLE;
-      ShowGrantRoleStmt showGrantRoleStmt = (ShowGrantRoleStmt) analysis.getStmt();
-      ddl.setShow_grant_role_params(showGrantRoleStmt.toThrift());
-      Set<String> groupNames =
-          getAuthzChecker().getUserGroups(analysis.getAnalyzer().getUser());
-      // User must be an admin to execute this operation if they have not been granted
-      // this role.
-      ddl.getShow_grant_role_params().setIs_admin_op(Sets.intersection(groupNames,
-          showGrantRoleStmt.getRole().getGrantGroups()).isEmpty());
-      metadata.setColumns(Arrays.asList(
-          new TColumn("name", Type.STRING.toThrift())));
-    } else if (analysis.isCreateDropRoleStmt()) {
-      CreateDropRoleStmt createDropRoleStmt = (CreateDropRoleStmt) analysis.getStmt();
-      TCreateDropRoleParams params = createDropRoleStmt.toThrift();
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(params.isIs_drop() ? TDdlType.DROP_ROLE : TDdlType.CREATE_ROLE);
-      req.setCreate_drop_role_params(params);
-      ddl.op_type = TCatalogOpType.DDL;
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isGrantRevokeRoleStmt()) {
-      GrantRevokeRoleStmt grantRoleStmt = (GrantRevokeRoleStmt) analysis.getStmt();
-      TGrantRevokeRoleParams params = grantRoleStmt.toThrift();
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(params.isIs_grant() ? TDdlType.GRANT_ROLE : TDdlType.REVOKE_ROLE);
-      req.setGrant_revoke_role_params(params);
-      ddl.op_type = TCatalogOpType.DDL;
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else if (analysis.isGrantRevokePrivStmt()) {
-      GrantRevokePrivStmt grantRevokePrivStmt = (GrantRevokePrivStmt) analysis.getStmt();
-      TGrantRevokePrivParams params = grantRevokePrivStmt.toThrift();
-      TDdlExecRequest req = new TDdlExecRequest();
-      req.setDdl_type(params.isIs_grant() ?
-          TDdlType.GRANT_PRIVILEGE : TDdlType.REVOKE_PRIVILEGE);
-      req.setGrant_revoke_priv_params(params);
-      ddl.op_type = TCatalogOpType.DDL;
-      ddl.setDdl_params(req);
-      metadata.setColumns(Collections.<TColumn>emptyList());
-    } else {
-      throw new IllegalStateException("Unexpected CatalogOp statement type.");
-    }
-
-    result.setResult_set_metadata(metadata);
-    result.setCatalog_op_request(ddl);
-    if (ddl.getOp_type() == TCatalogOpType.DDL) {
-      TCatalogServiceRequestHeader header = new TCatalogServiceRequestHeader();
-      header.setRequesting_user(analysis.getAnalyzer().getUser().getName());
-      ddl.getDdl_params().setHeader(header);
-    }
-  }
-
-  /**
-   * Loads a table or partition with one or more data files. If the "overwrite" flag
-   * in the request is true, all existing data in the table/partition will be replaced.
-   * If the "overwrite" flag is false, the files will be added alongside any existing
-   * data files.
-   */
-  public TLoadDataResp loadTableData(TLoadDataReq request) throws ImpalaException,
-      IOException {
-    TableName tableName = TableName.fromThrift(request.getTable_name());
-
-    // Get the destination for the load. If the load is targeting a partition,
-    // this the partition location. Otherwise this is the table location.
-    String destPathString = null;
-    if (request.isSetPartition_spec()) {
-      destPathString = impaladCatalog_.getHdfsPartition(tableName.getDb(),
-          tableName.getTbl(), request.getPartition_spec()).getLocation();
-    } else {
-      destPathString = impaladCatalog_.getTable(tableName.getDb(), tableName.getTbl())
-          .getMetaStoreTable().getSd().getLocation();
-    }
-
-    Path destPath = new Path(destPathString);
-    Path sourcePath = new Path(request.source_path);
-    FileSystem destFs = destPath.getFileSystem(FileSystemUtil.getConfiguration());
-    FileSystem sourceFs = sourcePath.getFileSystem(FileSystemUtil.getConfiguration());
-
-    // Create a temporary directory within the final destination directory to stage the
-    // file move.
-    Path tmpDestPath = FileSystemUtil.makeTmpSubdirectory(destPath);
-
-    int filesLoaded = 0;
-    if (sourceFs.isDirectory(sourcePath)) {
-      filesLoaded = FileSystemUtil.relocateAllVisibleFiles(sourcePath, tmpDestPath);
-    } else {
-      FileSystemUtil.relocateFile(sourcePath, tmpDestPath, true);
-      filesLoaded = 1;
-    }
-
-    // If this is an OVERWRITE, delete all files in the destination.
-    if (request.isOverwrite()) {
-      FileSystemUtil.deleteAllVisibleFiles(destPath);
-    }
-
-    // Move the files from the temporary location to the final destination.
-    FileSystemUtil.relocateAllVisibleFiles(tmpDestPath, destPath);
-    // Cleanup the tmp directory.
-    destFs.delete(tmpDestPath, true);
-    TLoadDataResp response = new TLoadDataResp();
-    TColumnValue col = new TColumnValue();
-    String loadMsg = String.format(
-        "Loaded %d file(s). Total files in destination location: %d",
-        filesLoaded, FileSystemUtil.getTotalNumVisibleFiles(destPath));
-    col.setString_val(loadMsg);
-    response.setLoad_summary(new TResultRow(Lists.newArrayList(col)));
-    return response;
-  }
-
-  /**
-   * Parses and plans a query in order to generate its explain string. This method does
-   * not increase the query id counter.
-   */
-  public String getExplainString(TQueryCtx queryCtx) throws ImpalaException {
-    StringBuilder stringBuilder = new StringBuilder();
-    createExecRequest(queryCtx, stringBuilder);
-    return stringBuilder.toString();
-  }
-
-  /**
-   * Returns all tables in database 'dbName' that match the pattern of 'matcher' and are
-   * accessible to 'user'.
-   */
-  public List<String> getTableNames(String dbName, PatternMatcher matcher,
-      User user) throws ImpalaException {
-    List<String> tblNames = impaladCatalog_.getTableNames(dbName, matcher);
-    if (authzConfig_.isEnabled()) {
-      Iterator<String> iter = tblNames.iterator();
-      while (iter.hasNext()) {
-        String tblName = iter.next();
-        PrivilegeRequest privilegeRequest = new PrivilegeRequestBuilder()
-            .any().onAnyColumn(dbName, tblName).toRequest();
-        if (!authzChecker_.get().hasAccess(user, privilegeRequest)) {
-          iter.remove();
-        }
-      }
-    }
-    return tblNames;
-  }
-
-  /**
-   * Returns a list of columns of a table using 'matcher' and are accessible
-   * to the given user.
-   */
-  public List<Column> getColumns(Table table, PatternMatcher matcher,
-      User user) throws InternalException {
-    Preconditions.checkNotNull(table);
-    Preconditions.checkNotNull(matcher);
-    List<Column> columns = Lists.newArrayList();
-    for (Column column: table.getColumnsInHiveOrder()) {
-      String colName = column.getName();
-      if (!matcher.matches(colName)) continue;
-      if (authzConfig_.isEnabled()) {
-        PrivilegeRequest privilegeRequest = new PrivilegeRequestBuilder()
-            .any().onColumn(table.getTableName().getDb(), table.getTableName().getTbl(),
-            colName).toRequest();
-        if (!authzChecker_.get().hasAccess(user, privilegeRequest)) continue;
-      }
-      columns.add(column);
-    }
-    return columns;
-  }
-
-  /**
-   * Returns all databases in catalog cache that match the pattern of 'matcher' and are
-   * accessible to 'user'.
-   */
-  public List<Db> getDbs(PatternMatcher matcher, User user)
-      throws InternalException {
-    List<Db> dbs = impaladCatalog_.getDbs(matcher);
-    // If authorization is enabled, filter out the databases the user does not
-    // have permissions on.
-    if (authzConfig_.isEnabled()) {
-      Iterator<Db> iter = dbs.iterator();
-      while (iter.hasNext()) {
-        Db db = iter.next();
-        if (!isAccessibleToUser(db, user)) iter.remove();
-      }
-    }
-    return dbs;
-  }
-
-  /**
-   * Check whether database is accessible to given user.
-   */
-  private boolean isAccessibleToUser(Db db, User user)
-      throws InternalException {
-    if (db.getName().toLowerCase().equals(Catalog.DEFAULT_DB.toLowerCase())) {
-      // Default DB should always be shown.
-      return true;
-    }
-    PrivilegeRequest request = new PrivilegeRequestBuilder()
-        .any().onAnyTable(db.getName()).toRequest();
-    return authzChecker_.get().hasAccess(user, request);
-  }
-
-  /**
-   * Returns all data sources that match the pattern. If pattern is null,
-   * matches all data sources.
-   */
-  public List<DataSource> getDataSrcs(String pattern) {
-    return impaladCatalog_.getDataSources(
-        PatternMatcher.createHivePatternMatcher(pattern));
-  }
-
-  /**
-   * Generate result set and schema for a SHOW COLUMN STATS command.
-   */
-  public TResultSet getColumnStats(String dbName, String tableName)
-      throws ImpalaException {
-    Table table = impaladCatalog_.getTable(dbName, tableName);
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-    resultSchema.addToColumns(new TColumn("Column", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Type", Type.STRING.toThrift()));
-    resultSchema.addToColumns(
-        new TColumn("#Distinct Values", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("#Nulls", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Max Size", Type.INT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Avg Size", Type.DOUBLE.toThrift()));
-
-    for (Column c: table.getColumnsInHiveOrder()) {
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-      // Add name, type, NDVs, numNulls, max size and avg size.
-      rowBuilder.add(c.getName()).add(c.getType().toSql())
-          .add(c.getStats().getNumDistinctValues()).add(c.getStats().getNumNulls())
-          .add(c.getStats().getMaxSize()).add(c.getStats().getAvgSize());
-      result.addToRows(rowBuilder.get());
-    }
-    return result;
-  }
-
-  /**
-   * Generate result set and schema for a SHOW TABLE STATS command.
-   */
-  public TResultSet getTableStats(String dbName, String tableName)
-      throws ImpalaException {
-    Table table = impaladCatalog_.getTable(dbName, tableName);
-    if (table instanceof HdfsTable) {
-      return ((HdfsTable) table).getTableStats();
-    } else if (table instanceof HBaseTable) {
-      return ((HBaseTable) table).getTableStats();
-    } else if (table instanceof DataSourceTable) {
-      return ((DataSourceTable) table).getTableStats();
-    } else if (table instanceof KuduTable) {
-      return ((KuduTable) table).getTableStats();
-    } else {
-      throw new InternalException("Invalid table class: " + table.getClass());
-    }
-  }
-
-  /**
-   * Returns all function signatures that match the pattern. If pattern is null,
-   * matches all functions. If exactMatch is true, treats fnPattern as a function
-   * name instead of pattern and returns exact match only.
-   */
-  public List<Function> getFunctions(TFunctionCategory category,
-      String dbName, String fnPattern, boolean exactMatch)
-      throws DatabaseNotFoundException {
-    Db db = impaladCatalog_.getDb(dbName);
-    if (db == null) {
-      throw new DatabaseNotFoundException("Database '" + dbName + "' not found");
-    }
-    List<Function> fns;
-    if (exactMatch) {
-      Preconditions.checkNotNull(fnPattern, "Invalid function name");
-      fns = db.getFunctions(category, fnPattern);
-    } else {
-      fns = db.getFunctions(
-        category, PatternMatcher.createHivePatternMatcher(fnPattern));
-    }
-    Collections.sort(fns,
-        new Comparator<Function>() {
-          @Override
-          public int compare(Function f1, Function f2) {
-            return f1.signatureString().compareTo(f2.signatureString());
-          }
-        });
-    return fns;
-  }
-
-  /**
-   * Returns database metadata, in the specified database. Throws an exception if db is
-   * not found or if there is an error loading the db metadata.
-   */
-  public TDescribeResult describeDb(String dbName, TDescribeOutputStyle outputStyle)
-      throws ImpalaException {
-    Db db = impaladCatalog_.getDb(dbName);
-    return DescribeResultFactory.buildDescribeDbResult(db, outputStyle);
-  }
-
-  /**
-   * Returns table metadata, such as the column descriptors, in the specified table.
-   * Throws an exception if the table or db is not found or if there is an error loading
-   * the table metadata.
-   */
-  public TDescribeResult describeTable(String dbName, String tableName,
-      TDescribeOutputStyle outputStyle, TColumnType tResultStruct)
-          throws ImpalaException {
-    if (outputStyle == TDescribeOutputStyle.MINIMAL) {
-      StructType resultStruct = (StructType)Type.fromThrift(tResultStruct);
-      return DescribeResultFactory.buildDescribeMinimalResult(resultStruct);
-    } else {
-      Preconditions.checkArgument(outputStyle == TDescribeOutputStyle.FORMATTED ||
-          outputStyle == TDescribeOutputStyle.EXTENDED);
-      Table table = impaladCatalog_.getTable(dbName, tableName);
-      return DescribeResultFactory.buildDescribeFormattedResult(table);
-    }
-  }
-
-  /**
-   * Given a set of table names, returns the set of table names that are missing
-   * metadata (are not yet loaded).
-   */
-  private Set<TableName> getMissingTbls(Set<TableName> tableNames) {
-    Set<TableName> missingTbls = new HashSet<TableName>();
-    for (TableName tblName: tableNames) {
-      Db db = getCatalog().getDb(tblName.getDb());
-      if (db == null) continue;
-      Table tbl = db.getTable(tblName.getTbl());
-      if (tbl == null) continue;
-      if (!tbl.isLoaded()) missingTbls.add(tblName);
-    }
-    return missingTbls;
-  }
-
-  /**
-   * Requests the catalog server load the given set of tables and waits until
-   * these tables show up in the local catalog, or the given timeout has been reached.
-   * The timeout is specified in milliseconds, with a value <= 0 indicating no timeout.
-   * The exact steps taken are:
-   * 1) Collect the tables that are missing (not yet loaded locally).
-   * 2) Make an RPC to the CatalogServer to prioritize the loading of these tables.
-   * 3) Wait until the local catalog contains all missing tables by (re)checking the
-   *    catalog each time a new catalog update is received.
-   *
-   * Returns true if all missing tables were received before timing out and false if
-   * the timeout was reached before all tables were received.
-   */
-  private boolean requestTblLoadAndWait(Set<TableName> requestedTbls, long timeoutMs)
-      throws InternalException {
-    Set<TableName> missingTbls = getMissingTbls(requestedTbls);
-    // There are no missing tables, return and avoid making an RPC to the CatalogServer.
-    if (missingTbls.isEmpty()) return true;
-
-    // Call into the CatalogServer and request the required tables be loaded.
-    LOG.info(String.format("Requesting prioritized load of table(s): %s",
-        Joiner.on(", ").join(missingTbls)));
-    TStatus status = FeSupport.PrioritizeLoad(missingTbls);
-    if (status.getStatus_code() != TErrorCode.OK) {
-      throw new InternalException("Error requesting prioritized load: " +
-          Joiner.on("\n").join(status.getError_msgs()));
-    }
-
-    long startTimeMs = System.currentTimeMillis();
-    // Wait until all the required tables are loaded in the Impalad's catalog cache.
-    while (!missingTbls.isEmpty()) {
-      // Check if the timeout has been reached.
-      if (timeoutMs > 0 && System.currentTimeMillis() - startTimeMs > timeoutMs) {
-        return false;
-      }
-
-      LOG.trace(String.format("Waiting for table(s) to complete loading: %s",
-          Joiner.on(", ").join(missingTbls)));
-      getCatalog().waitForCatalogUpdate(MAX_CATALOG_UPDATE_WAIT_TIME_MS);
-      missingTbls = getMissingTbls(missingTbls);
-      // TODO: Check for query cancellation here.
-    }
-    return true;
-  }
-
-  /**
-   * Overload of requestTblLoadAndWait that uses the default timeout.
-   */
-  public boolean requestTblLoadAndWait(Set<TableName> requestedTbls)
-      throws InternalException {
-    return requestTblLoadAndWait(requestedTbls, MISSING_TBL_LOAD_WAIT_TIMEOUT_MS);
-  }
-
-  /**
-   * Analyzes the SQL statement included in queryCtx and returns the AnalysisResult.
-   * Authorizes all catalog object accesses and throws an AuthorizationException
-   * if the user does not have privileges to access one or more objects.
-   * If a statement fails analysis because table/view metadata was not loaded, an
-   * RPC to the CatalogServer will be executed to request loading the missing metadata
-   * and analysis will be restarted once the required tables have been loaded
-   * in the local Impalad Catalog or the MISSING_TBL_LOAD_WAIT_TIMEOUT_MS timeout
-   * is reached.
-   * The goal of this timeout is not to analysis, but to restart the analysis/missing
-   * table collection process. This helps ensure a statement never waits indefinitely
-   * for a table to be loaded in event the table metadata was invalidated.
-   * TODO: Also consider adding an overall timeout that fails analysis.
-   */
-  private AnalysisContext.AnalysisResult analyzeStmt(TQueryCtx queryCtx)
-      throws AnalysisException, InternalException, AuthorizationException {
-    if (!impaladCatalog_.isReady()) {
-      throw new AnalysisException("This Impala daemon is not ready to accept user " +
-          "requests. Status: Waiting for catalog update from the StateStore.");
-    }
-
-    AnalysisContext analysisCtx = new AnalysisContext(impaladCatalog_, queryCtx,
-        authzConfig_);
-    LOG.debug("analyze query " + queryCtx.request.stmt);
-
-    // Run analysis in a loop until it any of the following events occur:
-    // 1) Analysis completes successfully.
-    // 2) Analysis fails with an AnalysisException AND there are no missing tables.
-    // 3) Analysis fails with an AuthorizationException.
-    try {
-      while (true) {
-        try {
-          analysisCtx.analyze(queryCtx.request.stmt);
-          Preconditions.checkState(analysisCtx.getAnalyzer().getMissingTbls().isEmpty());
-          return analysisCtx.getAnalysisResult();
-        } catch (AnalysisException e) {
-          Set<TableName> missingTbls = analysisCtx.getAnalyzer().getMissingTbls();
-          // Only re-throw the AnalysisException if there were no missing tables.
-          if (missingTbls.isEmpty()) throw e;
-
-          // Some tables/views were missing, request and wait for them to load.
-          if (!requestTblLoadAndWait(missingTbls, MISSING_TBL_LOAD_WAIT_TIMEOUT_MS)) {
-            LOG.info(String.format("Missing tables were not received in %dms. Load " +
-                "request will be retried.", MISSING_TBL_LOAD_WAIT_TIMEOUT_MS));
-          }
-        }
-      }
-    } finally {
-      // Authorize all accesses.
-      // AuthorizationExceptions must take precedence over any AnalysisException
-      // that has been thrown, so perform the authorization first.
-      analysisCtx.authorize(getAuthzChecker());
-    }
-  }
-
-  /**
-   * Create a populated TExecRequest corresponding to the supplied TQueryCtx.
-   */
-  public TExecRequest createExecRequest(TQueryCtx queryCtx, StringBuilder explainString)
-      throws ImpalaException {
-    // Analyze the statement
-    AnalysisContext.AnalysisResult analysisResult = analyzeStmt(queryCtx);
-    EventSequence timeline = analysisResult.getAnalyzer().getTimeline();
-    timeline.markEvent("Analysis finished");
-    Preconditions.checkNotNull(analysisResult.getStmt());
-    TExecRequest result = new TExecRequest();
-    result.setQuery_options(queryCtx.request.getQuery_options());
-    result.setAccess_events(analysisResult.getAccessEvents());
-    result.analysis_warnings = analysisResult.getAnalyzer().getWarnings();
-
-    if (analysisResult.isCatalogOp()) {
-      result.stmt_type = TStmtType.DDL;
-      createCatalogOpRequest(analysisResult, result);
-      TLineageGraph thriftLineageGraph = analysisResult.getThriftLineageGraph();
-      if (thriftLineageGraph != null && thriftLineageGraph.isSetQuery_text()) {
-        result.catalog_op_request.setLineage_graph(thriftLineageGraph);
-      }
-      // All DDL operations except for CTAS are done with analysis at this point.
-      if (!analysisResult.isCreateTableAsSelectStmt()) return result;
-    } else if (analysisResult.isLoadDataStmt()) {
-      result.stmt_type = TStmtType.LOAD;
-      result.setResult_set_metadata(new TResultSetMetadata(Arrays.asList(
-          new TColumn("summary", Type.STRING.toThrift()))));
-      result.setLoad_data_request(analysisResult.getLoadDataStmt().toThrift());
-      return result;
-    } else if (analysisResult.isSetStmt()) {
-      result.stmt_type = TStmtType.SET;
-      result.setResult_set_metadata(new TResultSetMetadata(Arrays.asList(
-          new TColumn("option", Type.STRING.toThrift()),
-          new TColumn("value", Type.STRING.toThrift()))));
-      result.setSet_query_option_request(analysisResult.getSetStmt().toThrift());
-      return result;
-    }
-
-    // create TQueryExecRequest
-    Preconditions.checkState(analysisResult.isQueryStmt() || analysisResult.isDmlStmt()
-        || analysisResult.isCreateTableAsSelectStmt() || analysisResult.isUpdateStmt()
-        || analysisResult.isDeleteStmt());
-
-    TQueryExecRequest queryExecRequest = new TQueryExecRequest();
-    // create plan
-    LOG.debug("create plan");
-    Planner planner = new Planner(analysisResult, queryCtx);
-    if (RuntimeEnv.INSTANCE.isTestEnv()
-        && queryCtx.request.query_options.mt_num_cores > 0) {
-      // TODO: this is just to be able to run tests; implement this
-      List<PlanFragment> planRoots = planner.createParallelPlans();
-      for (PlanFragment planRoot: planRoots) {
-        TPlanFragmentTree thriftPlan = planRoot.treeToThrift();
-        queryExecRequest.addToMt_plans(thriftPlan);
-      }
-      queryExecRequest.setDesc_tbl(analysisResult.getAnalyzer().getDescTbl().toThrift());
-      queryExecRequest.setQuery_ctx(queryCtx);
-      explainString.append(planner.getExplainString(
-          Lists.newArrayList(planRoots.get(0)), queryExecRequest,
-          TExplainLevel.STANDARD));
-      queryExecRequest.setQuery_plan(explainString.toString());
-      result.setQuery_exec_request(queryExecRequest);
-      return result;
-    }
-    ArrayList<PlanFragment> fragments = planner.createPlan();
-
-    List<ScanNode> scanNodes = Lists.newArrayList();
-    // map from fragment to its index in queryExecRequest.fragments; needed for
-    // queryExecRequest.dest_fragment_idx
-    Map<PlanFragment, Integer> fragmentIdx = Maps.newHashMap();
-
-    for (int idx = 0; idx < fragments.size(); ++idx) {
-      PlanFragment fragment = fragments.get(idx);
-      Preconditions.checkNotNull(fragment.getPlanRoot());
-      fragment.getPlanRoot().collect(Predicates.instanceOf(ScanNode.class), scanNodes);
-      fragmentIdx.put(fragment, idx);
-    }
-
-    // set fragment destinations
-    for (int i = 1; i < fragments.size(); ++i) {
-      PlanFragment dest = fragments.get(i).getDestFragment();
-      Integer idx = fragmentIdx.get(dest);
-      Preconditions.checkState(idx != null);
-      queryExecRequest.addToDest_fragment_idx(idx.intValue());
-    }
-
-    // Set scan ranges/locations for scan nodes.
-    // Also assemble list of tables names missing stats for assembling a warning message.
-    LOG.debug("get scan range locations");
-    Set<TTableName> tablesMissingStats = Sets.newTreeSet();
-    // Assemble a similar list for corrupt stats
-    Set<TTableName> tablesWithCorruptStats = Sets.newTreeSet();
-    for (ScanNode scanNode: scanNodes) {
-      queryExecRequest.putToPer_node_scan_ranges(
-          scanNode.getId().asInt(),
-          scanNode.getScanRangeLocations());
-      if (scanNode.isTableMissingStats()) {
-        tablesMissingStats.add(scanNode.getTupleDesc().getTableName().toThrift());
-      }
-      if (scanNode.hasCorruptTableStats()) {
-        tablesWithCorruptStats.add(scanNode.getTupleDesc().getTableName().toThrift());
-      }
-    }
-
-    queryExecRequest.setHost_list(analysisResult.getAnalyzer().getHostIndex().getList());
-    for (TTableName tableName: tablesMissingStats) {
-      queryCtx.addToTables_missing_stats(tableName);
-    }
-    for (TTableName tableName: tablesWithCorruptStats) {
-      queryCtx.addToTables_with_corrupt_stats(tableName);
-    }
-
-    // Optionally disable spilling in the backend. Allow spilling if there are plan hints
-    // or if all tables have stats.
-    if (queryCtx.request.query_options.isDisable_unsafe_spills()
-        && !tablesMissingStats.isEmpty()
-        && !analysisResult.getAnalyzer().hasPlanHints()) {
-      queryCtx.setDisable_spilling(true);
-    }
-
-    // Compute resource requirements after scan range locations because the cost
-    // estimates of scan nodes rely on them.
-    try {
-      planner.computeResourceReqs(fragments, true, queryExecRequest);
-    } catch (Exception e) {
-      // Turn exceptions into a warning to allow the query to execute.
-      LOG.error("Failed to compute resource requirements for query\n" +
-          queryCtx.request.getStmt(), e);
-    }
-
-    // The fragment at this point has all state set, serialize it to thrift.
-    for (PlanFragment fragment: fragments) {
-      TPlanFragment thriftFragment = fragment.toThrift();
-      queryExecRequest.addToFragments(thriftFragment);
-    }
-
-    // Use EXTENDED by default for all non-explain statements.
-    TExplainLevel explainLevel = TExplainLevel.EXTENDED;
-    // Use the query option for explain stmts and tests (e.g., planner tests).
-    if (analysisResult.isExplainStmt() || RuntimeEnv.INSTANCE.isTestEnv()) {
-      explainLevel = queryCtx.request.query_options.getExplain_level();
-    }
-
-    // Global query parameters to be set in each TPlanExecRequest.
-    queryExecRequest.setQuery_ctx(queryCtx);
-
-    explainString.append(
-        planner.getExplainString(fragments, queryExecRequest, explainLevel));
-    queryExecRequest.setQuery_plan(explainString.toString());
-    queryExecRequest.setDesc_tbl(analysisResult.getAnalyzer().getDescTbl().toThrift());
-
-    TLineageGraph thriftLineageGraph = analysisResult.getThriftLineageGraph();
-    if (thriftLineageGraph != null && thriftLineageGraph.isSetQuery_text()) {
-      queryExecRequest.setLineage_graph(thriftLineageGraph);
-    }
-
-    if (analysisResult.isExplainStmt()) {
-      // Return the EXPLAIN request
-      createExplainRequest(explainString.toString(), result);
-      return result;
-    }
-
-    result.setQuery_exec_request(queryExecRequest);
-
-    if (analysisResult.isQueryStmt()) {
-      // fill in the metadata
-      LOG.debug("create result set metadata");
-      result.stmt_type = TStmtType.QUERY;
-      result.query_exec_request.stmt_type = result.stmt_type;
-      TResultSetMetadata metadata = new TResultSetMetadata();
-      QueryStmt queryStmt = analysisResult.getQueryStmt();
-      int colCnt = queryStmt.getColLabels().size();
-      for (int i = 0; i < colCnt; ++i) {
-        TColumn colDesc = new TColumn();
-        colDesc.columnName = queryStmt.getColLabels().get(i);
-        colDesc.columnType = queryStmt.getResultExprs().get(i).getType().toThrift();
-        metadata.addToColumns(colDesc);
-      }
-      result.setResult_set_metadata(metadata);
-    } else if (analysisResult.isInsertStmt() ||
-        analysisResult.isCreateTableAsSelectStmt()) {
-      // For CTAS the overall TExecRequest statement type is DDL, but the
-      // query_exec_request should be DML
-      result.stmt_type =
-          analysisResult.isCreateTableAsSelectStmt() ? TStmtType.DDL : TStmtType.DML;
-      result.query_exec_request.stmt_type = TStmtType.DML;
-
-      // create finalization params of insert stmt
-      InsertStmt insertStmt = analysisResult.getInsertStmt();
-      if (insertStmt.getTargetTable() instanceof HdfsTable) {
-        TFinalizeParams finalizeParams = new TFinalizeParams();
-        finalizeParams.setIs_overwrite(insertStmt.isOverwrite());
-        finalizeParams.setTable_name(insertStmt.getTargetTableName().getTbl());
-        finalizeParams.setTable_id(insertStmt.getTargetTable().getId().asInt());
-        String db = insertStmt.getTargetTableName().getDb();
-        finalizeParams.setTable_db(db == null ? queryCtx.session.database : db);
-        HdfsTable hdfsTable = (HdfsTable) insertStmt.getTargetTable();
-        finalizeParams.setHdfs_base_dir(hdfsTable.getHdfsBaseDir());
-        finalizeParams.setStaging_dir(
-            hdfsTable.getHdfsBaseDir() + "/_impala_insert_staging");
-        queryExecRequest.setFinalize_params(finalizeParams);
-      }
-    } else {
-      Preconditions.checkState(analysisResult.isUpdateStmt() || analysisResult.isDeleteStmt());
-      result.stmt_type = TStmtType.DML;
-      result.query_exec_request.stmt_type = TStmtType.DML;
-    }
-
-    validateTableIds(analysisResult.getAnalyzer(), result);
-
-    timeline.markEvent("Planning finished");
-    result.setTimeline(analysisResult.getAnalyzer().getTimeline().toThrift());
-    return result;
-  }
-
-  /**
-   * Check that we don't have any duplicate table IDs (see IMPALA-1702).
-   * To be removed when IMPALA-1702 is resolved.
-   */
-  private void validateTableIds(Analyzer analyzer, TExecRequest result)
-      throws InternalException {
-    Map<TableId, Table> tableIds = Maps.newHashMap();
-    Collection<TupleDescriptor> tupleDescs = analyzer.getDescTbl().getTupleDescs();
-    for (TupleDescriptor desc: tupleDescs) {
-      // Skip if tuple descriptor did not come from materializing scan.
-      if (!desc.isMaterialized()) continue;
-      Table table = desc.getTable();
-      if (table == null) continue;
-      Table otherTable = tableIds.get(table.getId());
-      if (otherTable == table) continue; // Same table referenced twice
-      if (otherTable == null) {
-        tableIds.put(table.getId(), table);
-        continue;
-      }
-      LOG.error("Found duplicate table ID! id=" + table.getId() + "\ntable1=\n"
-          + table.toTCatalogObject() + "\ntable2=\n" + otherTable.toTCatalogObject()
-          + "\nexec_request=\n" + result);
-      throw new InternalException("Query encountered invalid metadata, likely due to " +
-          "IMPALA-1702. Please try rerunning the query.");
-    }
-  }
-
-  /**
-   * Attaches the explain result to the TExecRequest.
-   */
-  private void createExplainRequest(String explainString, TExecRequest result) {
-    // update the metadata - one string column
-    TColumn colDesc = new TColumn("Explain String", Type.STRING.toThrift());
-    TResultSetMetadata metadata = new TResultSetMetadata(Lists.newArrayList(colDesc));
-    result.setResult_set_metadata(metadata);
-
-    // create the explain result set - split the explain string into one line per row
-    String[] explainStringArray = explainString.toString().split("\n");
-    TExplainResult explainResult = new TExplainResult();
-    explainResult.results = Lists.newArrayList();
-    for (int i = 0; i < explainStringArray.length; ++i) {
-      TColumnValue col = new TColumnValue();
-      col.setString_val(explainStringArray[i]);
-      TResultRow row = new TResultRow(Lists.newArrayList(col));
-      explainResult.results.add(row);
-    }
-    result.setExplain_result(explainResult);
-    result.stmt_type = TStmtType.EXPLAIN;
-  }
-
-  /**
-   * Executes a HiveServer2 metadata operation and returns a TResultSet
-   */
-  public TResultSet execHiveServer2MetadataOp(TMetadataOpRequest request)
-      throws ImpalaException {
-    User user = request.isSetSession() ?
-        new User(TSessionStateUtil.getEffectiveUser(request.session)) :
-        ImpalaInternalAdminUser.getInstance();
-    switch (request.opcode) {
-      case GET_TYPE_INFO: return MetadataOp.getTypeInfo();
-      case GET_SCHEMAS:
-      {
-        TGetSchemasReq req = request.getGet_schemas_req();
-        return MetadataOp.getSchemas(this, req.getCatalogName(),
-            req.getSchemaName(), user);
-      }
-      case GET_TABLES:
-      {
-        TGetTablesReq req = request.getGet_tables_req();
-        return MetadataOp.getTables(this, req.getCatalogName(),
-            req.getSchemaName(), req.getTableName(), req.getTableTypes(), user);
-      }
-      case GET_COLUMNS:
-      {
-        TGetColumnsReq req = request.getGet_columns_req();
-        return MetadataOp.getColumns(this, req.getCatalogName(),
-            req.getSchemaName(), req.getTableName(), req.getColumnName(), user);
-      }
-      case GET_CATALOGS: return MetadataOp.getCatalogs();
-      case GET_TABLE_TYPES: return MetadataOp.getTableTypes();
-      case GET_FUNCTIONS:
-      {
-        TGetFunctionsReq req = request.getGet_functions_req();
-        return MetadataOp.getFunctions(this, req.getCatalogName(),
-            req.getSchemaName(), req.getFunctionName(), user);
-      }
-      default:
-        throw new NotImplementedException(request.opcode + " has not been implemented.");
-    }
-  }
-
-  /**
-   * Returns all files info of a table or partition.
-   */
-  public TResultSet getTableFiles(TShowFilesParams request)
-      throws ImpalaException{
-    Table table = impaladCatalog_.getTable(request.getTable_name().getDb_name(),
-        request.getTable_name().getTable_name());
-    if (table instanceof HdfsTable) {
-      return ((HdfsTable) table).getFiles(request.getPartition_spec());
-    } else {
-      throw new InternalException("SHOW FILES only supports Hdfs table. " +
-          "Unsupported table class: " + table.getClass());
-    }
-  }
-}


[32/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java b/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
deleted file mode 100644
index 4cd1c42..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
+++ /dev/null
@@ -1,522 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.PatternMatcher;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Thread safe interface for reading and updating metadata stored in the Hive MetaStore.
- * This class provides a storage API for caching CatalogObjects: databases, tables,
- * and functions and the relevant metadata to go along with them. Although this class is
- * thread safe, it does not guarantee consistency with the MetaStore. It is important
- * to keep in mind that there may be external (potentially conflicting) concurrent
- * metastore updates occurring at any time.
- * The CatalogObject storage hierarchy is:
- * Catalog -> Db -> Table
- *               -> Function
- * Each level has its own synchronization, so the cache of Dbs is synchronized and each
- * Db has a cache of tables which is synchronized independently.
- *
- * The catalog is populated with the impala builtins on startup. Builtins and user
- * functions are treated identically by the catalog. The builtins go in a specific
- * database that the user cannot modify.
- * Builtins are populated on startup in initBuiltins().
- */
-public abstract class Catalog {
-  private static final Logger LOG = Logger.getLogger(Catalog.class);
-
-  // Initial catalog version.
-  public final static long INITIAL_CATALOG_VERSION = 0L;
-  public static final String DEFAULT_DB = "default";
-  private static final int META_STORE_CLIENT_POOL_SIZE = 10;
-
-  public static final String BUILTINS_DB = "_impala_builtins";
-
-  protected final MetaStoreClientPool metaStoreClientPool_ = new MetaStoreClientPool(0);
-
-  // Cache of authorization policy metadata. Populated from data retried from the
-  // Sentry Service, if configured.
-  protected AuthorizationPolicy authPolicy_ = new AuthorizationPolicy();
-
-  // Thread safe cache of database metadata. Uses an AtomicReference so reset()
-  // operations can atomically swap dbCache_ references.
-  // TODO: Update this to use a CatalogObjectCache?
-  protected AtomicReference<ConcurrentHashMap<String, Db>> dbCache_ =
-      new AtomicReference<ConcurrentHashMap<String, Db>>(
-          new ConcurrentHashMap<String, Db>());
-
-  // DB that contains all builtins
-  private static Db builtinsDb_;
-
-  // Cache of data sources.
-  protected final CatalogObjectCache<DataSource> dataSources_;
-
-  // Cache of known HDFS cache pools. Allows for checking the existence
-  // of pools without hitting HDFS.
-  protected final CatalogObjectCache<HdfsCachePool> hdfsCachePools_ =
-      new CatalogObjectCache<HdfsCachePool>(false);
-
-  /**
-   * Creates a new instance of a Catalog. If initMetastoreClientPool is true, will
-   * also add META_STORE_CLIENT_POOL_SIZE clients to metastoreClientPool_.
-   */
-  public Catalog(boolean initMetastoreClientPool) {
-    if (initMetastoreClientPool) {
-      metaStoreClientPool_.addClients(META_STORE_CLIENT_POOL_SIZE);
-    }
-    dataSources_ = new CatalogObjectCache<DataSource>();
-    builtinsDb_ = new BuiltinsDb(BUILTINS_DB, this);
-    addDb(builtinsDb_);
-  }
-
-  public Db getBuiltinsDb() { return builtinsDb_; }
-
-  /**
-   * Adds a new database to the catalog, replacing any existing database with the same
-   * name. Returns the previous database with this name, or null if there was no
-   * previous database.
-   */
-  public Db addDb(Db db) {
-    return dbCache_.get().put(db.getName().toLowerCase(), db);
-  }
-
-  /**
-   * Gets the Db object from the Catalog using a case-insensitive lookup on the name.
-   * Returns null if no matching database is found.
-   */
-  public Db getDb(String dbName) {
-    Preconditions.checkState(dbName != null && !dbName.isEmpty(),
-        "Null or empty database name given as argument to Catalog.getDb");
-    return dbCache_.get().get(dbName.toLowerCase());
-  }
-
-  /**
-   * Removes a database from the metadata cache. Returns the value removed or null
-   * if not database was removed as part of this operation. Used by DROP DATABASE
-   * statements.
-   */
-  public Db removeDb(String dbName) {
-    return dbCache_.get().remove(dbName.toLowerCase());
-  }
-
-  /**
-   * Returns all databases that match 'matcher'.
-   */
-  public List<Db> getDbs(PatternMatcher matcher) {
-    return filterCatalogObjectsByPattern(dbCache_.get().values(), matcher);
-  }
-
-  /**
-   * Returns the Table object for the given dbName/tableName. This will trigger a
-   * metadata load if the table metadata is not yet cached.
-   */
-  public Table getTable(String dbName, String tableName) throws
-      CatalogException {
-    Db db = getDb(dbName);
-    if (db == null) {
-      throw new DatabaseNotFoundException("Database '" + dbName + "' not found");
-    }
-    return db.getTable(tableName);
-  }
-
-  /**
-   * Removes a table from the catalog and returns the table that was removed, or null
-   * if the table/database does not exist.
-   */
-  public Table removeTable(TTableName tableName) {
-    // Remove the old table name from the cache and add the new table.
-    Db db = getDb(tableName.getDb_name());
-    if (db == null) return null;
-    return db.removeTable(tableName.getTable_name());
-  }
-
-  /**
-   * Returns all tables in 'dbName' that match 'matcher'.
-   *
-   * dbName must not be null.
-   *
-   * Table names are returned unqualified.
-   */
-  public List<String> getTableNames(String dbName, PatternMatcher matcher)
-      throws DatabaseNotFoundException {
-    Preconditions.checkNotNull(dbName);
-    Db db = getDb(dbName);
-    if (db == null) {
-      throw new DatabaseNotFoundException("Database '" + dbName + "' not found");
-    }
-    return filterStringsByPattern(db.getAllTableNames(), matcher);
-  }
-
-  /**
-   * Returns true if the table and the database exist in the Impala Catalog. Returns
-   * false if either the table or the database do not exist.
-   */
-  public boolean containsTable(String dbName, String tableName) {
-    Db db = getDb(dbName);
-    return (db == null) ? false : db.containsTable(tableName);
-  }
-
-  /**
-   * Adds a data source to the in-memory map of data sources. It is not
-   * persisted to the metastore.
-   * @return true if this item was added or false if the existing value was preserved.
-   */
-  public boolean addDataSource(DataSource dataSource) {
-    return dataSources_.add(dataSource);
-  }
-
-  /**
-   * Removes a data source from the in-memory map of data sources.
-   * @return the item that was removed if it existed in the cache, null otherwise.
-   */
-  public DataSource removeDataSource(String dataSourceName) {
-    Preconditions.checkNotNull(dataSourceName);
-    return dataSources_.remove(dataSourceName.toLowerCase());
-  }
-
-  /**
-   * Gets the specified data source.
-   */
-  public DataSource getDataSource(String dataSourceName) {
-    Preconditions.checkNotNull(dataSourceName);
-    return dataSources_.get(dataSourceName.toLowerCase());
-  }
-
-  /**
-   * Gets a list of all data sources.
-   */
-  public List<DataSource> getDataSources() {
-    return dataSources_.getValues();
-  }
-
-  /**
-   * Returns a list of data sources names that match pattern.
-   *
-   * @see PatternMatcher#matches(String) for details of the pattern match semantics.
-   *
-   * pattern may be null (and thus matches everything).
-   */
-  public List<String> getDataSourceNames(String pattern) {
-    return filterStringsByPattern(dataSources_.keySet(),
-        PatternMatcher.createHivePatternMatcher(pattern));
-  }
-
-  /**
-   * Returns all DataSources that match 'matcher'.
-   */
-  public List<DataSource> getDataSources(PatternMatcher matcher) {
-    return filterCatalogObjectsByPattern(dataSources_.getValues(), matcher);
-  }
-
-  /**
-   * Adds a function to the catalog.
-   * Returns true if the function was successfully added.
-   * Returns false if the function already exists.
-   * TODO: allow adding a function to a global scope. We probably want this to resolve
-   * after the local scope.
-   * e.g. if we had fn() and db.fn(). If the current database is 'db', fn() would
-   * resolve first to db.fn().
-   */
-  public boolean addFunction(Function fn) {
-    Db db = getDb(fn.dbName());
-    if (db == null) return false;
-    return db.addFunction(fn);
-  }
-
-  /**
-   * Returns the function that best matches 'desc' that is registered with the
-   * catalog using 'mode' to check for matching. If desc matches multiple functions
-   * in the catalog, it will return the function with the strictest matching mode.
-   * If multiple functions match at the same matching mode, ties are broken by comparing
-   * argument types in lexical order. Argument types are ordered by argument precision
-   * (e.g. double is preferred over float) and then by alphabetical order of argument
-   * type name, to guarantee deterministic results.
-   */
-  public Function getFunction(Function desc, Function.CompareMode mode) {
-    Db db = getDb(desc.dbName());
-    if (db == null) return null;
-    return db.getFunction(desc, mode);
-  }
-
-  public static Function getBuiltin(Function desc, Function.CompareMode mode) {
-    return builtinsDb_.getFunction(desc, mode);
-  }
-
-  /**
-   * Removes a function from the catalog. Increments the catalog version and returns
-   * the Function object that was removed if the function existed, otherwise returns
-   * null.
-   */
-  public Function removeFunction(Function desc) {
-    Db db = getDb(desc.dbName());
-    if (db == null) return null;
-    return db.removeFunction(desc);
-  }
-
-  /**
-   * Returns true if there is a function with this function name. Parameters
-   * are ignored.
-   */
-  public boolean containsFunction(FunctionName name) {
-    Db db = getDb(name.getDb());
-    if (db == null) return false;
-    return db.containsFunction(name.getFunction());
-  }
-
-  /**
-   * Adds a new HdfsCachePool to the catalog.
-   */
-  public boolean addHdfsCachePool(HdfsCachePool cachePool) {
-    return hdfsCachePools_.add(cachePool);
-  }
-
-  /**
-   * Gets a HdfsCachePool given a cache pool name. Returns null if the cache
-   * pool does not exist.
-   */
-  public HdfsCachePool getHdfsCachePool(String poolName) {
-    return hdfsCachePools_.get(poolName);
-  }
-
-  /**
-   * Release the Hive Meta Store Client resources. Can be called multiple times
-   * (additional calls will be no-ops).
-   */
-  public void close() { metaStoreClientPool_.close(); }
-
-
-  /**
-   * Returns a managed meta store client from the client connection pool.
-   */
-  public MetaStoreClient getMetaStoreClient() { return metaStoreClientPool_.getClient(); }
-
-  /**
-   * Return all members of 'candidates' that match 'matcher'.
-   * The results are sorted in String.CASE_INSENSITIVE_ORDER.
-   * matcher must not be null.
-   */
-  private List<String> filterStringsByPattern(Iterable<String> candidates,
-      PatternMatcher matcher) {
-    Preconditions.checkNotNull(matcher);
-    List<String> filtered = Lists.newArrayList();
-    for (String candidate: candidates) {
-      if (matcher.matches(candidate)) filtered.add(candidate);
-    }
-    Collections.sort(filtered, String.CASE_INSENSITIVE_ORDER);
-    return filtered;
-  }
-
-  private static class CatalogObjectOrder implements Comparator<CatalogObject> {
-    @Override
-    public int compare(CatalogObject o1, CatalogObject o2) {
-      return String.CASE_INSENSITIVE_ORDER.compare(o1.getName(), o2.getName());
-    }
-  }
-
-  private static final CatalogObjectOrder CATALOG_OBJECT_ORDER = new CatalogObjectOrder();
-
-  /**
-   * Return all members of 'candidates' that match 'matcher'.
-   * The results are sorted in CATALOG_OBJECT_ORDER.
-   * matcher must not be null.
-   */
-  private <T extends CatalogObject> List<T> filterCatalogObjectsByPattern(
-      Iterable<? extends T> candidates, PatternMatcher matcher) {
-    Preconditions.checkNotNull(matcher);
-    List<T> filtered = Lists.newArrayList();
-    for (T candidate: candidates) {
-      if (matcher.matches(candidate.getName())) filtered.add(candidate);
-    }
-    Collections.sort(filtered, CATALOG_OBJECT_ORDER);
-    return filtered;
-  }
-
-  public HdfsPartition getHdfsPartition(String dbName, String tableName,
-      org.apache.hadoop.hive.metastore.api.Partition msPart) throws CatalogException {
-    List<TPartitionKeyValue> partitionSpec = Lists.newArrayList();
-    Table table = getTable(dbName, tableName);
-    if (!(table instanceof HdfsTable)) {
-      throw new PartitionNotFoundException(
-          "Not an HdfsTable: " + dbName + "." + tableName);
-    }
-    for (int i = 0; i < msPart.getValues().size(); ++i) {
-      partitionSpec.add(new TPartitionKeyValue(
-          ((HdfsTable)table).getColumns().get(i).getName(), msPart.getValues().get(i)));
-    }
-    return getHdfsPartition(table.getDb().getName(), table.getName(), partitionSpec);
-  }
-
-  /**
-   * Returns the HdfsPartition object for the given dbName/tableName and partition spec.
-   * This will trigger a metadata load if the table metadata is not yet cached.
-   * @throws DatabaseNotFoundException - If the database does not exist.
-   * @throws TableNotFoundException - If the table does not exist.
-   * @throws PartitionNotFoundException - If the partition does not exist.
-   * @throws TableLoadingException - If there is an error loading the table metadata.
-   */
-  public HdfsPartition getHdfsPartition(String dbName, String tableName,
-      List<TPartitionKeyValue> partitionSpec) throws CatalogException {
-    String partitionNotFoundMsg =
-        "Partition not found: " + Joiner.on(", ").join(partitionSpec);
-    Table table = getTable(dbName, tableName);
-    // This is not an Hdfs table, throw an error.
-    if (!(table instanceof HdfsTable)) {
-      throw new PartitionNotFoundException(partitionNotFoundMsg);
-    }
-    // Get the HdfsPartition object for the given partition spec.
-    HdfsPartition partition =
-        ((HdfsTable) table).getPartitionFromThriftPartitionSpec(partitionSpec);
-    if (partition == null) throw new PartitionNotFoundException(partitionNotFoundMsg);
-    return partition;
-  }
-
-  /**
-   * Returns true if the table contains the given partition spec, otherwise false.
-   * This may trigger a metadata load if the table metadata is not yet cached.
-   * @throws DatabaseNotFoundException - If the database does not exist.
-   * @throws TableNotFoundException - If the table does not exist.
-   * @throws TableLoadingException - If there is an error loading the table metadata.
-   */
-  public boolean containsHdfsPartition(String dbName, String tableName,
-      List<TPartitionKeyValue> partitionSpec) throws CatalogException {
-    try {
-      return getHdfsPartition(dbName, tableName, partitionSpec) != null;
-    } catch (PartitionNotFoundException e) {
-      return false;
-    }
-  }
-
-  /**
-   * Gets the thrift representation of a catalog object, given the "object
-   * description". The object description is just a TCatalogObject with only the
-   * catalog object type and object name set.
-   * If the object is not found, a CatalogException is thrown.
-   */
-  public TCatalogObject getTCatalogObject(TCatalogObject objectDesc)
-      throws CatalogException {
-    TCatalogObject result = new TCatalogObject();
-    switch (objectDesc.getType()) {
-      case DATABASE: {
-        Db db = getDb(objectDesc.getDb().getDb_name());
-        if (db == null) {
-          throw new CatalogException(
-              "Database not found: " + objectDesc.getDb().getDb_name());
-        }
-        result.setType(db.getCatalogObjectType());
-        result.setCatalog_version(db.getCatalogVersion());
-        result.setDb(db.toThrift());
-        break;
-      }
-      case TABLE:
-      case VIEW: {
-        Table table = getTable(objectDesc.getTable().getDb_name(),
-            objectDesc.getTable().getTbl_name());
-        if (table == null) {
-          throw new CatalogException("Table not found: " +
-              objectDesc.getTable().getTbl_name());
-        }
-        result.setType(table.getCatalogObjectType());
-        result.setCatalog_version(table.getCatalogVersion());
-        result.setTable(table.toThrift());
-        break;
-      }
-      case FUNCTION: {
-        TFunction tfn = objectDesc.getFn();
-        Function desc = Function.fromThrift(tfn);
-        Function fn = getFunction(desc, Function.CompareMode.IS_INDISTINGUISHABLE);
-        if (fn == null) {
-          throw new CatalogException("Function not found: " + tfn);
-        }
-        result.setType(fn.getCatalogObjectType());
-        result.setCatalog_version(fn.getCatalogVersion());
-        result.setFn(fn.toThrift());
-        break;
-      }
-      case DATA_SOURCE: {
-        String dataSrcName = objectDesc.getData_source().getName();
-        DataSource dataSrc = getDataSource(dataSrcName);
-        if (dataSrc == null) {
-          throw new CatalogException("Data source not found: " + dataSrcName);
-        }
-        result.setType(dataSrc.getCatalogObjectType());
-        result.setCatalog_version(dataSrc.getCatalogVersion());
-        result.setData_source(dataSrc.toThrift());
-        break;
-      }
-      case HDFS_CACHE_POOL: {
-        HdfsCachePool pool = getHdfsCachePool(objectDesc.getCache_pool().getPool_name());
-        if (pool == null) {
-          throw new CatalogException(
-              "Hdfs cache pool not found: " + objectDesc.getCache_pool().getPool_name());
-        }
-        result.setType(pool.getCatalogObjectType());
-        result.setCatalog_version(pool.getCatalogVersion());
-        result.setCache_pool(pool.toThrift());
-        break;
-      }
-      case ROLE:
-        Role role = authPolicy_.getRole(objectDesc.getRole().getRole_name());
-        if (role == null) {
-          throw new CatalogException("Role not found: " +
-              objectDesc.getRole().getRole_name());
-        }
-        result.setType(role.getCatalogObjectType());
-        result.setCatalog_version(role.getCatalogVersion());
-        result.setRole(role.toThrift());
-        break;
-      case PRIVILEGE:
-        Role tmpRole = authPolicy_.getRole(objectDesc.getPrivilege().getRole_id());
-        if (tmpRole == null) {
-          throw new CatalogException("No role associated with ID: " +
-              objectDesc.getPrivilege().getRole_id());
-        }
-        for (RolePrivilege p: tmpRole.getPrivileges()) {
-          if (p.getName().equalsIgnoreCase(
-              objectDesc.getPrivilege().getPrivilege_name())) {
-            result.setType(p.getCatalogObjectType());
-            result.setCatalog_version(p.getCatalogVersion());
-            result.setPrivilege(p.toThrift());
-            return result;
-          }
-        }
-        throw new CatalogException(String.format("Role '%s' does not contain " +
-            "privilege: '%s'", tmpRole.getName(),
-            objectDesc.getPrivilege().getPrivilege_name()));
-      default: throw new IllegalStateException(
-          "Unexpected TCatalogObject type: " + objectDesc.getType());
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/CatalogDeltaLog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/CatalogDeltaLog.java b/fe/src/main/java/com/cloudera/impala/catalog/CatalogDeltaLog.java
deleted file mode 100644
index 72e4990..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/CatalogDeltaLog.java
+++ /dev/null
@@ -1,120 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TTable;
-import com.google.common.base.Preconditions;
-
-/**
- * The impalad catalog cache can be modified by either a state store update or by a
- * direct ("fast") update that applies the result of a catalog operation to the cache
- * out-of-band of a state store update. This thread safe log tracks the divergence
- * (due to direct updates to the cache) of this impalad's cache from the last state
- * store update. This log is needed to ensure work is never undone. For example,
- * consider the following sequence of events:
- * t1: [Direct Update] - Add item A - (Catalog Version 9)
- * t2: [Direct Update] - Drop item A - (Catalog Version 10)
- * t3: [StateStore Update] - (From Catalog Version 9)
- * This log is used to ensure the state store update in t3 does not undo the drop in t2.
- *
- * Currently this only tracks objects that were dropped, since the catalog cache can be
- * queried to check if an object was added. TODO: Also track object additions from async
- * operations. This could be used to to "replay" the log in the case of a catalog reset
- * ("invalidate metadata"). Currently, the catalog may briefly go back in time if
- * "invalidate metadata" is run concurrently with async catalog operations.
- */
-public class CatalogDeltaLog {
-  // Map of the catalog version an object was removed from the catalog
-  // to the catalog object, ordered by catalog version.
-  private SortedMap<Long, TCatalogObject> removedCatalogObjects_ =
-      new TreeMap<Long, TCatalogObject>();
-
-  /**
-   * Adds a new item to the map of removed catalog objects.
-   */
-  public synchronized void addRemovedObject(TCatalogObject catalogObject) {
-    Preconditions.checkNotNull(catalogObject);
-    removedCatalogObjects_.put(catalogObject.getCatalog_version(), catalogObject);
-  }
-
-  /**
-   * Given the current catalog version, removes all items with catalogVersion <
-   * currectCatalogVersion. Such objects do not need to be tracked in the delta
-   * log anymore because they are consistent with the state store's view of the
-   * catalog.
-   */
-  public synchronized void garbageCollect(long currentCatalogVersion) {
-    // Nothing will be garbage collected so avoid creating a new object.
-    if (!removedCatalogObjects_.isEmpty() &&
-        removedCatalogObjects_.firstKey() < currentCatalogVersion) {
-      removedCatalogObjects_ = new TreeMap<Long, TCatalogObject>(
-          removedCatalogObjects_.tailMap(currentCatalogVersion));
-    }
-  }
-
-  /**
-   * Checks if a matching catalog object was removed in a catalog version after this
-   * object's catalog version. Returns true if there was a matching object that was
-   * removed after this object, false otherwise.
-   */
-  public synchronized boolean wasObjectRemovedAfter(TCatalogObject catalogObject) {
-    Preconditions.checkNotNull(catalogObject);
-    if (removedCatalogObjects_.isEmpty()) return false;
-
-    // Get all the items that were removed after the catalog version of this object.
-    SortedMap<Long, TCatalogObject> candidateObjects =
-        removedCatalogObjects_.tailMap(catalogObject.getCatalog_version());
-    for (Map.Entry<Long, TCatalogObject> entry: candidateObjects.entrySet()) {
-      if (objectNamesMatch(catalogObject, entry.getValue())) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Returns true if the two objects have the same object type and name.
-   * TODO: Use global object IDs everywhere instead of tracking catalog objects by name.
-   */
-  private boolean objectNamesMatch(TCatalogObject first, TCatalogObject second) {
-    if (first.getType() != second.getType()) return false;
-    switch (first.getType()) {
-      case DATABASE:
-        return first.getDb().getDb_name().equalsIgnoreCase(second.getDb().getDb_name());
-      case TABLE:
-      case VIEW:
-        TTable firstTbl = first.getTable();
-        return firstTbl.getDb_name().equalsIgnoreCase(second.getTable().getDb_name()) &&
-            firstTbl.getTbl_name().equalsIgnoreCase(second.getTable().getTbl_name());
-      case FUNCTION:
-        return first.getFn().getSignature().equals(second.getFn().getSignature()) &&
-            first.getFn().getName().equals(second.getFn().getName());
-      case ROLE:
-        return first.getRole().getRole_name().equalsIgnoreCase(
-            second.getRole().getRole_name());
-      case PRIVILEGE:
-        return first.getPrivilege().getPrivilege_name().equalsIgnoreCase(
-            second.getPrivilege().getPrivilege_name()) &&
-            first.getPrivilege().getRole_id() == second.getPrivilege().getRole_id();
-      default: return false;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/CatalogException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/CatalogException.java b/fe/src/main/java/com/cloudera/impala/catalog/CatalogException.java
deleted file mode 100644
index 92fc3a0..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/CatalogException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.common.ImpalaException;
-
-/**
- * Base class for exceptions related to accessing objects in the Catalog.
- */
-public class CatalogException extends ImpalaException {
-  // Dummy serial UID to avoid Eclipse warnings
-  private static final long serialVersionUID = -1273205863485997544L;
-
-  public CatalogException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  public CatalogException(String msg) {
-    super(msg);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/CatalogObject.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/CatalogObject.java b/fe/src/main/java/com/cloudera/impala/catalog/CatalogObject.java
deleted file mode 100644
index e98bcde..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/CatalogObject.java
+++ /dev/null
@@ -1,40 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.thrift.TCatalogObjectType;
-
-/**
- * Interface that all catalog objects implement.
- */
-public interface CatalogObject {
-  // Returns the TCatalogObject type of this Catalog object.
-  public TCatalogObjectType getCatalogObjectType();
-
-  // Returns the unqualified object name.
-  public String getName();
-
-  // Returns the version of this catalog object.
-  public long getCatalogVersion();
-
-  // Sets the version of this catalog object.
-  public void setCatalogVersion(long newVersion);
-
-  // Returns true if this CatalogObject has had its metadata loaded, false otherwise.
-  public boolean isLoaded();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/CatalogObjectCache.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/CatalogObjectCache.java b/fe/src/main/java/com/cloudera/impala/catalog/CatalogObjectCache.java
deleted file mode 100644
index bd9e3f0..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/CatalogObjectCache.java
+++ /dev/null
@@ -1,148 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Thread safe cache for storing CatalogObjects. Enforces that updates to existing
- * entries only get applied if the new/updated object has a larger catalog version.
- */
-public class CatalogObjectCache<T extends CatalogObject> implements Iterable<T> {
-  private static final Logger LOG = Logger.getLogger(CatalogObjectCache.class);
-  private final boolean caseInsensitiveKeys_;
-
-  /**
-   * Creates a new instance of the CatalogObjectCache that compares keys as
-   * insensitive.
-   */
-  public CatalogObjectCache() {
-    this(true);
-  }
-
-  /**
-   * Creates a new instance of the CatalogObjectCache that compares keys as case
-   * insensitive/sensitive based on whether 'caseInsensitiveKeys' is true/false.
-   */
-  public CatalogObjectCache(boolean caseInsensitiveKeys) {
-    caseInsensitiveKeys_ = caseInsensitiveKeys;
-  }
-
-  // Map of lower-case object name to CatalogObject. New entries are added
-  // by calling add(). Updates of the cache must be synchronized because adding
-  // new entries may require two cache accesses that must be performed atomically.
-  // TODO: For simplicity, consider using a (non-concurrent) HashMap and marking
-  // all methods as synchronized.
-  private final ConcurrentHashMap<String, T> metadataCache_ =
-      new ConcurrentHashMap<String, T>();
-
-  /**
-   * Adds a new catalogObject to the cache. If a catalogObject with the same name already
-   * exists in the cache, the new item will only be added if it has a larger catalog
-   * version.
-   * Synchronized because add() may require two cache accesses that must be performed
-   * atomically.
-   * Returns true if this item was added or false if the existing value was preserved.
-   */
-  public synchronized boolean add(T catalogObject) {
-    Preconditions.checkNotNull(catalogObject);
-    String key = catalogObject.getName();
-    if (caseInsensitiveKeys_) key = key.toLowerCase();
-    T existingItem = metadataCache_.putIfAbsent(key, catalogObject);
-    if (existingItem == null) return true;
-
-    if (existingItem.getCatalogVersion() < catalogObject.getCatalogVersion()) {
-      // When existingItem != null it indicates there was already an existing entry
-      // associated with the key. Add the updated object iff it has a catalog
-      // version greater than the existing entry.
-      metadataCache_.put(key, catalogObject);
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Removes an item from the metadata cache and returns the removed item, or null
-   * if no item was removed.
-   */
-  public synchronized T remove(String name) {
-    if (caseInsensitiveKeys_) name = name.toLowerCase();
-    return metadataCache_.remove(name);
-  }
-
-  /**
-   * Clears all items in the cache.
-   */
-  public synchronized void clear() {
-    metadataCache_.clear();
-  }
-
-  /**
-   * Returns the set of all known object names. The returned set is backed by
-   * the cache, so updates to the cache will be visible in the returned set
-   * and vice-versa. However, updates to the cache should not be done via the
-   * returned set, use add()/remove() instead.
-   */
-  public Set<String> keySet() {
-    return metadataCache_.keySet();
-  }
-
-  /**
-   * Returns all the known object values.
-   */
-  public List<T> getValues() {
-    return Lists.newArrayList(metadataCache_.values());
-  }
-
-  /**
-   * Returns true if the metadataCache_ contains a key with the given name.
-   */
-  public boolean contains(String name) {
-    if (caseInsensitiveKeys_) name = name.toLowerCase();
-    return metadataCache_.containsKey(name);
-  }
-
-  /**
-   * Returns the catalog object corresponding to the supplied name if it exists in the
-   * cache, or null if there is no entry in metadataCache_ associated with this
-   * key.
-   */
-  public T get(String name) {
-    if (caseInsensitiveKeys_) name = name.toLowerCase();
-    return metadataCache_.get(name);
-  }
-
-  /**
-   * Returns an iterator for the values in the cache. There are no guarantees
-   * about the order in which elements are returned. All items at the time of
-   * iterator creation will be visible and new items may or may not be visible.
-   * Thread safe (will never throw a ConcurrentModificationException).
-   */
-  @Override
-  public Iterator<T> iterator() {
-    return metadataCache_.values().iterator();
-  }
-}
\ No newline at end of file


[04/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
new file mode 100644
index 0000000..7b59625
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
@@ -0,0 +1,212 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+import java.util.EnumSet;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.KuduTable;
+import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.TableId;
+import com.cloudera.impala.catalog.TableLoadingException;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.service.CatalogOpExecutor;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Represents a CREATE TABLE AS SELECT (CTAS) statement
+ *
+ * The statement supports an optional PARTITIONED BY clause. Its syntax and semantics
+ * follow the PARTITION feature of INSERT FROM SELECT statements: inside the PARTITIONED
+ * BY (...) column list the user must specify names of the columns to partition by. These
+ * column names must appear in the specified order at the end of the select statement. A
+ * remapping between columns of the source and destination tables is not possible, because
+ * the destination table does not yet exist. Specifying static values for the partition
+ * columns is also not possible, as their type needs to be deduced from columns in the
+ * select statement.
+ */
+public class CreateTableAsSelectStmt extends StatementBase {
+  private final CreateTableStmt createStmt_;
+
+  // List of partition columns from the PARTITIONED BY (...) clause. Set to null if no
+  // partition was given.
+  private final List<String> partitionKeys_;
+
+  /////////////////////////////////////////
+  // BEGIN: Members that need to be reset()
+
+  private final InsertStmt insertStmt_;
+
+  // END: Members that need to be reset()
+  /////////////////////////////////////////
+
+  private final static EnumSet<THdfsFileFormat> SUPPORTED_INSERT_FORMATS =
+      EnumSet.of(THdfsFileFormat.PARQUET, THdfsFileFormat.TEXT);
+
+  /**
+   * Builds a CREATE TABLE AS SELECT statement
+   */
+  public CreateTableAsSelectStmt(CreateTableStmt createStmt, QueryStmt queryStmt,
+      List<String> partitionKeys) {
+    Preconditions.checkNotNull(queryStmt);
+    Preconditions.checkNotNull(createStmt);
+    createStmt_ = createStmt;
+    partitionKeys_ = partitionKeys;
+    List<PartitionKeyValue> pkvs = null;
+    if (partitionKeys != null) {
+      pkvs = Lists.newArrayList();
+      for (String key: partitionKeys) {
+        pkvs.add(new PartitionKeyValue(key, null));
+      }
+    }
+    insertStmt_ = new InsertStmt(null, createStmt.getTblName(), false, pkvs,
+        null, queryStmt, null, false);
+  }
+
+  public QueryStmt getQueryStmt() { return insertStmt_.getQueryStmt(); }
+  public InsertStmt getInsertStmt() { return insertStmt_; }
+  public CreateTableStmt getCreateStmt() { return createStmt_; }
+  @Override
+  public String toSql() { return ToSqlUtils.getCreateTableSql(this); }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed()) return;
+    super.analyze(analyzer);
+
+    // The analysis for CTAS happens in two phases - the first phase happens before
+    // the target table exists and we want to validate the CREATE statement and the
+    // query portion of the insert statement. If this passes, analysis will be run
+    // over the full INSERT statement. To avoid duplicate registrations of table/colRefs,
+    // create a new root analyzer and clone the query statement for this initial pass.
+    Analyzer dummyRootAnalyzer = new Analyzer(analyzer.getCatalog(),
+        analyzer.getQueryCtx(), analyzer.getAuthzConfig());
+    QueryStmt tmpQueryStmt = insertStmt_.getQueryStmt().clone();
+    try {
+      Analyzer tmpAnalyzer = new Analyzer(dummyRootAnalyzer);
+      tmpAnalyzer.setUseHiveColLabels(true);
+      tmpQueryStmt.analyze(tmpAnalyzer);
+      // Subqueries need to be rewritten by the StmtRewriter first.
+      if (analyzer.containsSubquery()) return;
+    } finally {
+      // Record missing tables in the original analyzer.
+      analyzer.getMissingTbls().addAll(dummyRootAnalyzer.getMissingTbls());
+    }
+
+    // Add the columns from the partition clause to the create statement.
+    if (partitionKeys_ != null) {
+      int colCnt = tmpQueryStmt.getColLabels().size();
+      int partColCnt = partitionKeys_.size();
+      if (partColCnt >= colCnt) {
+        throw new AnalysisException(String.format("Number of partition columns (%s) " +
+            "must be smaller than the number of columns in the select statement (%s).",
+            partColCnt, colCnt));
+      }
+      int firstCol = colCnt - partColCnt;
+      for (int i = firstCol, j = 0; i < colCnt; ++i, ++j) {
+        String partitionLabel = partitionKeys_.get(j);
+        String colLabel = tmpQueryStmt.getColLabels().get(i);
+
+        // Ensure that partition columns are named and positioned at end of
+        // input column list.
+        if (!partitionLabel.equals(colLabel)) {
+          throw new AnalysisException(String.format("Partition column name " +
+              "mismatch: %s != %s", partitionLabel, colLabel));
+        }
+
+        ColumnDef colDef = new ColumnDef(colLabel, null, null);
+        colDef.setType(tmpQueryStmt.getBaseTblResultExprs().get(i).getType());
+        createStmt_.getPartitionColumnDefs().add(colDef);
+      }
+      // Remove partition columns from table column list.
+      tmpQueryStmt.getColLabels().subList(firstCol, colCnt).clear();
+    }
+
+    // Add the columns from the select statement to the create statement.
+    int colCnt = tmpQueryStmt.getColLabels().size();
+    createStmt_.getColumnDefs().clear();
+    for (int i = 0; i < colCnt; ++i) {
+      ColumnDef colDef = new ColumnDef(
+          tmpQueryStmt.getColLabels().get(i), null, null);
+      colDef.setType(tmpQueryStmt.getBaseTblResultExprs().get(i).getType());
+      createStmt_.getColumnDefs().add(colDef);
+    }
+    createStmt_.analyze(analyzer);
+
+    if (!SUPPORTED_INSERT_FORMATS.contains(createStmt_.getFileFormat())) {
+      throw new AnalysisException(String.format("CREATE TABLE AS SELECT " +
+          "does not support (%s) file format. Supported formats are: (%s)",
+          createStmt_.getFileFormat().toString().replace("_", ""),
+          "PARQUET, TEXTFILE"));
+    }
+
+    // The full privilege check for the database will be done as part of the INSERT
+    // analysis.
+    Db db = analyzer.getDb(createStmt_.getDb(), Privilege.ANY);
+    if (db == null) {
+      throw new AnalysisException(
+          Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + createStmt_.getDb());
+    }
+
+    // Running analysis on the INSERT portion of the CTAS requires the target INSERT
+    // table to "exist". For CTAS the table does not exist yet, so create a "temp"
+    // table to run analysis against. The schema of this temp table should exactly
+    // match the schema of the table that will be created by running the CREATE
+    // statement.
+    org.apache.hadoop.hive.metastore.api.Table msTbl =
+        CatalogOpExecutor.createMetaStoreTable(createStmt_.toThrift());
+
+    try (MetaStoreClient client = analyzer.getCatalog().getMetaStoreClient()) {
+      // Set a valid location of this table using the same rules as the metastore. If the
+      // user specified a location for the table this will be a no-op.
+      msTbl.getSd().setLocation(analyzer.getCatalog().getTablePath(msTbl).toString());
+
+      // Create a "temp" table based off the given metastore.api.Table object. Normally,
+      // the CatalogService assigns all table IDs, but in this case we need to assign the
+      // "temp" table an ID locally. This table ID cannot conflict with any table in the
+      // SelectStmt (or the BE will be very confused). To ensure the ID is unique within
+      // this query, just assign it the invalid table ID. The CatalogServer will assign
+      // this table a proper ID once it is created there as part of the CTAS execution.
+      Table table = Table.fromMetastoreTable(TableId.createInvalidId(), db, msTbl);
+      Preconditions.checkState(table != null &&
+          (table instanceof HdfsTable || table instanceof KuduTable));
+
+      table.load(true, client.getHiveClient(), msTbl);
+      insertStmt_.setTargetTable(table);
+    } catch (TableLoadingException e) {
+      throw new AnalysisException(e.getMessage(), e);
+    } catch (Exception e) {
+      throw new AnalysisException(e.getMessage(), e);
+    }
+
+    // Finally, run analysis on the insert statement.
+    insertStmt_.analyze(analyzer);
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    insertStmt_.reset();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
new file mode 100644
index 0000000..0faf881
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_API_VER;
+import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_CLASS;
+import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_DATA_SRC_NAME;
+import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_INIT_STRING;
+import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_LOCATION;
+
+import java.util.List;
+import java.util.Map;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.DataSource;
+import com.cloudera.impala.catalog.DataSourceTable;
+import com.cloudera.impala.catalog.RowFormat;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.permission.FsAction;
+
+/**
+ * Represents a CREATE TABLE statement for external data sources. Such tables
+ * reference an external data source (created with a CREATE DATA SOURCE statement)
+ * and the properties of that source are stored in the table properties because
+ * the metastore does not store the data sources themselves.
+ */
+public class CreateTableDataSrcStmt extends CreateTableStmt {
+
+  public CreateTableDataSrcStmt(TableName tableName, List<ColumnDef> columnDefs,
+      String dataSourceName, String initString, String comment, boolean ifNotExists) {
+    super(tableName, columnDefs, Lists.<ColumnDef>newArrayList(), false, comment,
+        RowFormat.DEFAULT_ROW_FORMAT, THdfsFileFormat.TEXT, null, null, ifNotExists,
+        createInitialTableProperties(dataSourceName, initString),
+        Maps.<String, String>newHashMap(), null);
+  }
+
+  /**
+   * Creates the initial map of table properties containing the name of the data
+   * source and the table init string.
+   */
+  private static Map<String, String> createInitialTableProperties(
+      String dataSourceName, String initString) {
+    Preconditions.checkNotNull(dataSourceName);
+    Map<String, String> tableProperties = Maps.newHashMap();
+    tableProperties.put(TBL_PROP_DATA_SRC_NAME, dataSourceName.toLowerCase());
+    tableProperties.put(TBL_PROP_INIT_STRING, Strings.nullToEmpty(initString));
+    return tableProperties;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    String dataSourceName = getTblProperties().get(TBL_PROP_DATA_SRC_NAME);
+    DataSource dataSource = analyzer.getCatalog().getDataSource(dataSourceName);
+    if (dataSource == null) {
+      throw new AnalysisException("Data source does not exist: " + dataSourceName);
+    }
+
+    for (ColumnDef col: getColumnDefs()) {
+      if (!DataSourceTable.isSupportedColumnType(col.getType())) {
+        throw new AnalysisException("Tables produced by an external data source do " +
+            "not support the column type: " + col.getType());
+      }
+    }
+    // Add table properties from the DataSource catalog object now that we have access
+    // to the catalog. These are stored in the table metadata because DataSource catalog
+    // objects are not currently persisted.
+    String location = dataSource.getLocation();
+    getTblProperties().put(TBL_PROP_LOCATION, location);
+    getTblProperties().put(TBL_PROP_CLASS, dataSource.getClassName());
+    getTblProperties().put(TBL_PROP_API_VER, dataSource.getApiVersion());
+    new HdfsUri(location).analyze(analyzer, Privilege.ALL, FsAction.READ);
+    // TODO: check class exists and implements API version
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
new file mode 100644
index 0000000..6695cac
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
@@ -0,0 +1,375 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+
+import parquet.hadoop.ParquetFileReader;
+import parquet.hadoop.metadata.ParquetMetadata;
+import parquet.schema.OriginalType;
+import parquet.schema.PrimitiveType;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.ArrayType;
+import com.cloudera.impala.catalog.HdfsCompression;
+import com.cloudera.impala.catalog.HdfsFileFormat;
+import com.cloudera.impala.catalog.MapType;
+import com.cloudera.impala.catalog.RowFormat;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.StructField;
+import com.cloudera.impala.catalog.StructType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.FileSystemUtil;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+
+/**
+ * Represents a CREATE TABLE tablename LIKE fileformat '/path/to/file' statement
+ * where the schema is inferred from the given file. Does not partition the table by
+ * default.
+ */
+public class CreateTableLikeFileStmt extends CreateTableStmt {
+  private final HdfsUri schemaLocation_;
+  private final THdfsFileFormat schemaFileFormat_;
+  private final static String ERROR_MSG =
+      "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
+
+  public CreateTableLikeFileStmt(TableName tableName, THdfsFileFormat schemaFileFormat,
+      HdfsUri schemaLocation, List<ColumnDef> partitionColumnDescs,
+      boolean isExternal, String comment, RowFormat rowFormat,
+      THdfsFileFormat fileFormat, HdfsUri location, HdfsCachingOp cachingOp,
+      boolean ifNotExists, Map<String, String> tblProperties,
+      Map<String, String> serdeProperties) {
+    super(tableName, new ArrayList<ColumnDef>(), partitionColumnDescs,
+        isExternal, comment, rowFormat,
+        fileFormat, location, cachingOp, ifNotExists, tblProperties, serdeProperties,
+        null);
+    schemaLocation_ = schemaLocation;
+    schemaFileFormat_ = schemaFileFormat;
+  }
+
+  /**
+   * Reads the first block from the given HDFS file and returns the Parquet schema.
+   * Throws Analysis exception for any failure, such as failing to read the file
+   * or failing to parse the contents.
+   */
+  private static parquet.schema.MessageType loadParquetSchema(Path pathToFile)
+      throws AnalysisException {
+    try {
+      FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
+      if (!fs.isFile(pathToFile)) {
+        throw new AnalysisException("Cannot infer schema, path is not a file: " +
+                                    pathToFile);
+      }
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to connect to filesystem:" + e);
+    } catch (IllegalArgumentException e) {
+      throw new AnalysisException(e.getMessage());
+    }
+    ParquetMetadata readFooter = null;
+    try {
+      readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
+          pathToFile);
+    } catch (FileNotFoundException e) {
+      throw new AnalysisException("File not found: " + e);
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to open file as a parquet file: " + e);
+    } catch (RuntimeException e) {
+      // Parquet throws a generic RuntimeException when reading a non-parquet file
+      if (e.toString().contains("is not a Parquet file")) {
+        throw new AnalysisException("File is not a parquet file: " + pathToFile);
+      }
+      // otherwise, who knows what we caught, throw it back up
+      throw e;
+    }
+     return readFooter.getFileMetaData().getSchema();
+  }
+
+  /**
+   * Converts a "primitive" Parquet type to an Impala type.
+   * A primitive type is a non-nested type with no annotations.
+   */
+  private static Type convertPrimitiveParquetType(parquet.schema.Type parquetType)
+      throws AnalysisException {
+    Preconditions.checkState(parquetType.isPrimitive());
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    switch (prim.getPrimitiveTypeName()) {
+      case BINARY: return Type.STRING;
+      case BOOLEAN: return Type.BOOLEAN;
+      case DOUBLE: return Type.DOUBLE;
+      case FIXED_LEN_BYTE_ARRAY:
+        throw new AnalysisException(
+            "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
+                parquetType.getName());
+      case FLOAT: return Type.FLOAT;
+      case INT32: return Type.INT;
+      case INT64: return Type.BIGINT;
+      case INT96: return Type.TIMESTAMP;
+      default:
+        Preconditions.checkState(false, "Unexpected parquet primitive type: " +
+               prim.getPrimitiveTypeName());
+        return null;
+    }
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala map Type. We support both standard
+   * Parquet map representations, as well as legacy. Legacy representations are handled
+   * according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
+   *
+   * Standard representation of a map in Parquet:
+   * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
+   * repeated group key_value {
+   *     required <key-type> key;
+   *     <optional | required> <value-type> value;
+   *   }
+   * }
+   */
+  private static MapType convertMap(parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have exactly 1 inner field."));
+    }
+
+    parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(parquet.schema.Type.Repetition.REPEATED)){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have a repeated inner field."));
+    }
+    if (innerField.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must be a group."));
+    }
+
+    parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    // It does not matter whether innerGroup has an annotation or not (for example it may
+    // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
+    // annotation and the case the innerGroup does not have an annotation the same.
+    if (innerGroup.getFieldCount() != 2) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
+    }
+
+    parquet.schema.Type key = innerGroup.getType(0);
+    if (!key.getName().equals("key")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the first field of the inner field of the logical MAP " +
+          "type must be 'key'"));
+    }
+    if (!key.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The key type of the logical MAP type must be primitive."));
+    }
+    parquet.schema.Type value = innerGroup.getType(1);
+    if (!value.getName().equals("value")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the second field of the inner field of the logical MAP " +
+          "type must be 'value'"));
+    }
+
+    return new MapType(convertParquetType(key), convertParquetType(value));
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala struct Type.
+   */
+  private static StructType convertStruct(parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    ArrayList<StructField> structFields = new ArrayList<StructField>();
+    for (parquet.schema.Type field: outerGroup.getFields()) {
+      StructField f = new StructField(field.getName(), convertParquetType(field));
+      structFields.add(f);
+    }
+    return new StructType(structFields);
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala array Type. We can handle the standard
+   * representation, but also legacy representations for backwards compatibility.
+   * Legacy representations are handled according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
+   *
+   * Standard representation of an array in Parquet:
+   * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
+   *   repeated group list {
+   *     <optional | required> <element-type> element;
+   *   }
+   * }
+   */
+  private static ArrayType convertArray(parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The logical LIST type must have exactly 1 inner field."));
+    }
+
+    parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(parquet.schema.Type.Repetition.REPEATED)) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The inner field of the logical LIST type must be repeated."));
+    }
+    if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
+      // From the Parquet Spec:
+      // 1. If the repeated field is not a group then it's type is the element type.
+      //
+      // If innerField is a group, but originalType is not null, the element type is
+      // based on the logical type.
+      return new ArrayType(convertParquetType(innerField));
+    }
+
+    parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    if (innerGroup.getFieldCount() != 1) {
+      // From the Parquet Spec:
+      // 2. If the repeated field is a group with multiple fields, then it's type is a
+      //    struct.
+      return new ArrayType(convertStruct(innerGroup));
+    }
+
+    return new ArrayType(convertParquetType(innerGroup.getType(0)));
+  }
+
+  /**
+   * Converts a "logical" Parquet type to an Impala column type.
+   * A Parquet type is considered logical when it has an annotation. The annotation is
+   * stored as a "OriginalType". The Parquet documentation refers to these as logical
+   * types, so we use that terminology here.
+   */
+  private static Type convertLogicalParquetType(parquet.schema.Type parquetType)
+      throws AnalysisException {
+    OriginalType orig = parquetType.getOriginalType();
+    if (orig == OriginalType.LIST) {
+      return convertArray(parquetType.asGroupType());
+    }
+    if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
+      // MAP_KEY_VALUE annotation should not be used any more. However, according to the
+      // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
+      // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
+      // contained by a MAP-annotated group should be handled as a MAP-annotated group.
+      return convertMap(parquetType.asGroupType());
+    }
+
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
+        orig == OriginalType.UTF8) {
+      // UTF8 is the type annotation Parquet uses for strings
+      // We check to make sure it applies to BINARY to avoid errors if there is a bad
+      // annotation.
+      return Type.STRING;
+    }
+
+    if (orig == OriginalType.DECIMAL) {
+      return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
+                                           prim.getDecimalMetadata().getScale());
+    }
+
+    throw new AnalysisException(
+        "Unsupported logical parquet type " + orig + " (primitive type is " +
+            prim.getPrimitiveTypeName().name() + ") for field " +
+            parquetType.getName());
+  }
+
+  /**
+   * Converts a Parquet type into an Impala type.
+   */
+  private static Type convertParquetType(parquet.schema.Type field)
+      throws AnalysisException {
+    Type type = null;
+    // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
+    // interpreted as an array. The following 2 examples should be interpreted as an array
+    // of integers, but this is currently not done.
+    // 1. repeated int int_col;
+    // 2. required group int_arr {
+    //      repeated group list {
+    //        required int element;
+    //      }
+    //    }
+    if (field.getOriginalType() != null) {
+      type = convertLogicalParquetType(field);
+    } else if (field.isPrimitive()) {
+      type = convertPrimitiveParquetType(field);
+    } else {
+      // If field is not primitive, it must be a struct.
+      type = convertStruct(field.asGroupType());
+    }
+    return type;
+  }
+
+  /**
+   * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
+   * This fails with an analysis exception if any errors occur reading the file,
+   * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
+   */
+  private static List<ColumnDef> extractParquetSchema(HdfsUri location)
+      throws AnalysisException {
+    parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
+    List<parquet.schema.Type> fields = parquetSchema.getFields();
+    List<ColumnDef> schema = new ArrayList<ColumnDef>();
+
+    for (parquet.schema.Type field: fields) {
+      Type type = convertParquetType(field);
+      Preconditions.checkNotNull(type);
+      String colName = field.getName();
+      schema.add(new ColumnDef(colName, new TypeDef(type),
+          "Inferred from Parquet file."));
+    }
+    return schema;
+  }
+
+  @Override
+  public String toSql() {
+    ArrayList<String> colsSql = Lists.newArrayList();
+    ArrayList<String> partitionColsSql = Lists.newArrayList();
+    HdfsCompression compression = HdfsCompression.fromFileName(
+        schemaLocation_.toString());
+    String s = ToSqlUtils.getCreateTableSql(getDb(),
+        getTbl() + " __LIKE_FILEFORMAT__ ",  getComment(), colsSql, partitionColsSql,
+        getTblProperties(), getSerdeProperties(), isExternal(), getIfNotExists(),
+        getRowFormat(), HdfsFileFormat.fromThrift(getFileFormat()),
+        compression, null, getLocation());
+    s = s.replace("__LIKE_FILEFORMAT__", "LIKE " + schemaFileFormat_ + " " +
+        schemaLocation_.toString());
+    return s;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    schemaLocation_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    switch (schemaFileFormat_) {
+      case PARQUET:
+        getColumnDefs().addAll(extractParquetSchema(schemaLocation_));
+        break;
+      default:
+        throw new AnalysisException("Unsupported file type for schema inference: "
+            + schemaFileFormat_);
+    }
+    super.analyze(analyzer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
new file mode 100644
index 0000000..a7e2038
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
@@ -0,0 +1,157 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TCatalogObjectType;
+import com.cloudera.impala.thrift.TCreateTableLikeParams;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a CREATE TABLE LIKE statement which creates a new table based on
+ * a copy of an existing table definition.
+ */
+public class CreateTableLikeStmt extends StatementBase {
+  private final TableName tableName_;
+  private final TableName srcTableName_;
+  private final boolean isExternal_;
+  private final String comment_;
+  private final THdfsFileFormat fileFormat_;
+  private final HdfsUri location_;
+  private final boolean ifNotExists_;
+
+  // Set during analysis
+  private String dbName_;
+  private String srcDbName_;
+  private String owner_;
+
+  /**
+   * Builds a CREATE TABLE LIKE statement
+   * @param tableName - Name of the new table
+   * @param srcTableName - Name of the source table (table to copy)
+   * @param isExternal - If true, the table's data will be preserved if dropped.
+   * @param comment - Comment to attach to the table
+   * @param fileFormat - File format of the table
+   * @param location - The HDFS location of where the table data will stored.
+   * @param ifNotExists - If true, no errors are thrown if the table already exists
+   */
+  public CreateTableLikeStmt(TableName tableName, TableName srcTableName,
+      boolean isExternal, String comment, THdfsFileFormat fileFormat, HdfsUri location,
+      boolean ifNotExists) {
+    Preconditions.checkNotNull(tableName);
+    Preconditions.checkNotNull(srcTableName);
+    this.tableName_ = tableName;
+    this.srcTableName_ = srcTableName;
+    this.isExternal_ = isExternal;
+    this.comment_ = comment;
+    this.fileFormat_ = fileFormat;
+    this.location_ = location;
+    this.ifNotExists_ = ifNotExists;
+  }
+
+  public String getTbl() { return tableName_.getTbl(); }
+  public String getSrcTbl() { return srcTableName_.getTbl(); }
+  public boolean isExternal() { return isExternal_; }
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public String getComment() { return comment_; }
+  public THdfsFileFormat getFileFormat() { return fileFormat_; }
+  public HdfsUri getLocation() { return location_; }
+
+  /**
+   * Can only be called after analysis, returns the name of the database the table will
+   * be created within.
+   */
+  public String getDb() {
+    Preconditions.checkNotNull(dbName_);
+    return dbName_;
+  }
+
+  /**
+   * Can only be called after analysis, returns the name of the database the table will
+   * be created within.
+   */
+  public String getSrcDb() {
+    Preconditions.checkNotNull(srcDbName_);
+    return srcDbName_;
+  }
+
+  public String getOwner() {
+    Preconditions.checkNotNull(owner_);
+    return owner_;
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("CREATE ");
+    if (isExternal_) sb.append("EXTERNAL ");
+    sb.append("TABLE ");
+    if (ifNotExists_) sb.append("IF NOT EXISTS ");
+    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
+    sb.append(tableName_.getTbl() + " LIKE ");
+    if (srcTableName_.getDb() != null) sb.append(srcTableName_.getDb() + ".");
+    sb.append(srcTableName_.getTbl());
+    if (comment_ != null) sb.append(" COMMENT '" + comment_ + "'");
+    if (fileFormat_ != null) sb.append(" STORED AS " + fileFormat_);
+    if (location_ != null) sb.append(" LOCATION '" + location_ + "'");
+    return sb.toString();
+  }
+
+  public TCreateTableLikeParams toThrift() {
+    TCreateTableLikeParams params = new TCreateTableLikeParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+    params.setSrc_table_name(new TTableName(getSrcDb(), getSrcTbl()));
+    params.setOwner(getOwner());
+    params.setIs_external(isExternal());
+    params.setComment(comment_);
+    if (fileFormat_ != null) params.setFile_format(fileFormat_);
+    params.setLocation(location_ == null ? null : location_.toString());
+    params.setIf_not_exists(getIfNotExists());
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
+    Preconditions.checkState(srcTableName_ != null && !srcTableName_.isEmpty());
+    // Make sure the source table exists and the user has permission to access it.
+    srcDbName_ = analyzer
+        .getTable(srcTableName_, Privilege.VIEW_METADATA)
+        .getDb().getName();
+    tableName_.analyze();
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    owner_ = analyzer.getUser().getName();
+
+    if (analyzer.dbContainsTable(dbName_, tableName_.getTbl(), Privilege.CREATE) &&
+        !ifNotExists_) {
+      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
+          String.format("%s.%s", dbName_, getTbl()));
+    }
+    analyzer.addAccessEvent(new TAccessEvent(dbName_ + "." + tableName_.getTbl(),
+        TCatalogObjectType.TABLE, Privilege.CREATE.toString()));
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
new file mode 100644
index 0000000..f7b683f
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
@@ -0,0 +1,416 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.avro.Schema;
+import org.apache.avro.SchemaParseException;
+import org.apache.hadoop.fs.permission.FsAction;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HdfsStorageDescriptor;
+import com.cloudera.impala.catalog.KuduTable;
+import com.cloudera.impala.catalog.RowFormat;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.FileSystemUtil;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TCatalogObjectType;
+import com.cloudera.impala.thrift.TCreateTableParams;
+import com.cloudera.impala.thrift.THdfsFileFormat;
+import com.cloudera.impala.thrift.TTableName;
+import com.cloudera.impala.util.AvroSchemaConverter;
+import com.cloudera.impala.util.AvroSchemaParser;
+import com.cloudera.impala.util.AvroSchemaUtils;
+import com.cloudera.impala.util.KuduUtil;
+import com.cloudera.impala.util.MetaStoreUtil;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Represents a CREATE TABLE statement.
+ */
+public class CreateTableStmt extends StatementBase {
+  private List<ColumnDef> columnDefs_;
+  private final String comment_;
+  private final boolean isExternal_;
+  private final boolean ifNotExists_;
+  private final THdfsFileFormat fileFormat_;
+  private final ArrayList<ColumnDef> partitionColDefs_;
+  private final RowFormat rowFormat_;
+  private TableName tableName_;
+  private final Map<String, String> tblProperties_;
+  private final Map<String, String> serdeProperties_;
+  private final HdfsCachingOp cachingOp_;
+  private HdfsUri location_;
+  private final List<DistributeParam> distributeParams_;
+
+  // Set during analysis
+  private String owner_;
+
+  /**
+   * Builds a CREATE TABLE statement
+   * @param tableName - Name of the new table
+   * @param columnDefs - List of column definitions for the table
+   * @param partitionColumnDefs - List of partition column definitions for the table
+   * @param isExternal - If true, the table's data will be preserved if dropped.
+   * @param comment - Comment to attach to the table
+   * @param rowFormat - Custom row format of the table. Use RowFormat.DEFAULT_ROW_FORMAT
+   *          to specify default row format.
+   * @param fileFormat - File format of the table
+   * @param location - The HDFS location of where the table data will stored.
+   * @param cachingOp - The HDFS caching op that should be applied to this table.
+   * @param ifNotExists - If true, no errors are thrown if the table already exists.
+   * @param tblProperties - Optional map of key/values to persist with table metadata.
+   * @param serdeProperties - Optional map of key/values to persist with table serde
+   *                          metadata.
+   */
+  public CreateTableStmt(TableName tableName, List<ColumnDef> columnDefs,
+      List<ColumnDef> partitionColumnDefs, boolean isExternal, String comment,
+      RowFormat rowFormat, THdfsFileFormat fileFormat, HdfsUri location,
+      HdfsCachingOp cachingOp, boolean ifNotExists, Map<String, String> tblProperties,
+      Map<String, String> serdeProperties, List<DistributeParam> distributeParams) {
+    Preconditions.checkNotNull(columnDefs);
+    Preconditions.checkNotNull(partitionColumnDefs);
+    Preconditions.checkNotNull(fileFormat);
+    Preconditions.checkNotNull(rowFormat);
+    Preconditions.checkNotNull(tableName);
+
+    columnDefs_ = Lists.newArrayList(columnDefs);
+    comment_ = comment;
+    isExternal_ = isExternal;
+    ifNotExists_ = ifNotExists;
+    fileFormat_ = fileFormat;
+    location_ = location;
+    cachingOp_ = cachingOp;
+    partitionColDefs_ = Lists.newArrayList(partitionColumnDefs);
+    rowFormat_ = rowFormat;
+    tableName_ = tableName;
+    tblProperties_ = tblProperties;
+    serdeProperties_ = serdeProperties;
+    unescapeProperties(tblProperties_);
+    unescapeProperties(serdeProperties_);
+    distributeParams_ = distributeParams;
+  }
+
+  /**
+   * Copy c'tor.
+   */
+  public CreateTableStmt(CreateTableStmt other) {
+    columnDefs_ = Lists.newArrayList(other.columnDefs_);
+    comment_ = other.comment_;
+    isExternal_ = other.isExternal_;
+    ifNotExists_ = other.ifNotExists_;
+    fileFormat_ = other.fileFormat_;
+    location_ = other.location_;
+    cachingOp_ = other.cachingOp_;
+    partitionColDefs_ = Lists.newArrayList(other.partitionColDefs_);
+    rowFormat_ = other.rowFormat_;
+    tableName_ = other.tableName_;
+    tblProperties_ = other.tblProperties_;
+    serdeProperties_ = other.serdeProperties_;
+    distributeParams_ = other.distributeParams_;
+  }
+
+  @Override
+  public CreateTableStmt clone() { return new CreateTableStmt(this); }
+
+  public String getTbl() { return tableName_.getTbl(); }
+  public TableName getTblName() { return tableName_; }
+  public List<ColumnDef> getColumnDefs() { return columnDefs_; }
+  public List<ColumnDef> getPartitionColumnDefs() { return partitionColDefs_; }
+  public String getComment() { return comment_; }
+  public boolean isExternal() { return isExternal_; }
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public HdfsUri getLocation() { return location_; }
+  public void setLocation(HdfsUri location) { this.location_ = location; }
+  public THdfsFileFormat getFileFormat() { return fileFormat_; }
+  public RowFormat getRowFormat() { return rowFormat_; }
+  public Map<String, String> getTblProperties() { return tblProperties_; }
+  public Map<String, String> getSerdeProperties() { return serdeProperties_; }
+
+  /**
+   * Can only be called after analysis, returns the owner of this table (the user from
+   * the current session).
+   */
+  public String getOwner() {
+    Preconditions.checkNotNull(owner_);
+    return owner_;
+  }
+
+  /**
+   * Can only be called after analysis, returns the name of the database the table will
+   * be created within.
+   */
+  public String getDb() {
+    Preconditions.checkState(isAnalyzed());
+    return tableName_.getDb();
+  }
+
+  @Override
+  public String toSql() { return ToSqlUtils.getCreateTableSql(this); }
+
+  public TCreateTableParams toThrift() {
+    TCreateTableParams params = new TCreateTableParams();
+    params.setTable_name(new TTableName(getDb(), getTbl()));
+    for (ColumnDef col: getColumnDefs()) {
+      params.addToColumns(col.toThrift());
+    }
+    for (ColumnDef col: getPartitionColumnDefs()) {
+      params.addToPartition_columns(col.toThrift());
+    }
+    params.setOwner(getOwner());
+    params.setIs_external(isExternal());
+    params.setComment(comment_);
+    params.setLocation(location_ == null ? null : location_.toString());
+    if (cachingOp_ != null) params.setCache_op(cachingOp_.toThrift());
+    params.setRow_format(rowFormat_.toThrift());
+    params.setFile_format(fileFormat_);
+    params.setIf_not_exists(getIfNotExists());
+    if (tblProperties_ != null) params.setTable_properties(tblProperties_);
+    if (serdeProperties_ != null) params.setSerde_properties(serdeProperties_);
+    if (distributeParams_ != null) {
+      for (DistributeParam d : distributeParams_) {
+        params.addToDistribute_by(d.toThrift());
+      }
+    }
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
+    tableName_ = analyzer.getFqTableName(tableName_);
+    tableName_.analyze();
+    owner_ = analyzer.getUser().getName();
+
+    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
+    MetaStoreUtil.checkShortPropertyMap("Serde property", serdeProperties_);
+
+    if (analyzer.dbContainsTable(tableName_.getDb(), tableName_.getTbl(),
+        Privilege.CREATE) && !ifNotExists_) {
+      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG + tableName_);
+    }
+
+    analyzer.addAccessEvent(new TAccessEvent(tableName_.toString(),
+        TCatalogObjectType.TABLE, Privilege.CREATE.toString()));
+
+    // Only Avro tables can have empty column defs because they can infer them from
+    // the Avro schema.
+    if (columnDefs_.isEmpty() && fileFormat_ != THdfsFileFormat.AVRO) {
+      throw new AnalysisException("Table requires at least 1 column");
+    }
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+
+    analyzeRowFormat(analyzer);
+
+    // Check that all the column names are valid and unique.
+    analyzeColumnDefs(analyzer);
+
+    if (getTblProperties() != null && KuduTable.KUDU_STORAGE_HANDLER.equals(
+        getTblProperties().get(KuduTable.KEY_STORAGE_HANDLER))) {
+      analyzeKuduTable(analyzer);
+    } else if (distributeParams_ != null) {
+      throw new AnalysisException("Only Kudu tables can use DISTRIBUTE BY clause.");
+    }
+
+    if (fileFormat_ == THdfsFileFormat.AVRO) {
+      columnDefs_ = analyzeAvroSchema(analyzer);
+      if (columnDefs_.isEmpty()) {
+        throw new AnalysisException(
+            "An Avro table requires column definitions or an Avro schema.");
+      }
+      AvroSchemaUtils.setFromSerdeComment(columnDefs_);
+      analyzeColumnDefs(analyzer);
+    }
+
+    if (cachingOp_ != null) {
+      cachingOp_.analyze(analyzer);
+      if (cachingOp_.shouldCache() && location_ != null &&
+          !FileSystemUtil.isPathCacheable(location_.getPath())) {
+        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
+            "Please retry without caching: CREATE TABLE %s ... UNCACHED",
+            location_.toString(), tableName_));
+      }
+    }
+
+    // Analyze 'skip.header.line.format' property.
+    if (tblProperties_ != null) {
+      AlterTableSetTblProperties.analyzeSkipHeaderLineCount(tblProperties_);
+    }
+  }
+
+  private void analyzeRowFormat(Analyzer analyzer) throws AnalysisException {
+    Byte fieldDelim = analyzeRowFormatValue(rowFormat_.getFieldDelimiter());
+    Byte lineDelim = analyzeRowFormatValue(rowFormat_.getLineDelimiter());
+    Byte escapeChar = analyzeRowFormatValue(rowFormat_.getEscapeChar());
+    if (fileFormat_ == THdfsFileFormat.TEXT) {
+      if (fieldDelim == null) fieldDelim = HdfsStorageDescriptor.DEFAULT_FIELD_DELIM;
+      if (lineDelim == null) lineDelim = HdfsStorageDescriptor.DEFAULT_LINE_DELIM;
+      if (escapeChar == null) escapeChar = HdfsStorageDescriptor.DEFAULT_ESCAPE_CHAR;
+      if (fieldDelim != null && lineDelim != null && fieldDelim.equals(lineDelim)) {
+        throw new AnalysisException("Field delimiter and line delimiter have same " +
+            "value: byte " + fieldDelim);
+      }
+      if (fieldDelim != null && escapeChar != null && fieldDelim.equals(escapeChar)) {
+        analyzer.addWarning("Field delimiter and escape character have same value: " +
+            "byte " + fieldDelim + ". Escape character will be ignored");
+      }
+      if (lineDelim != null && escapeChar != null && lineDelim.equals(escapeChar)) {
+        analyzer.addWarning("Line delimiter and escape character have same value: " +
+            "byte " + lineDelim + ". Escape character will be ignored");
+      }
+    }
+  }
+
+  /**
+   * Analyzes columnDefs_ and partitionColDefs_ checking whether all column
+   * names are unique.
+   */
+  private void analyzeColumnDefs(Analyzer analyzer) throws AnalysisException {
+    Set<String> colNames = Sets.newHashSet();
+    for (ColumnDef colDef: columnDefs_) {
+      colDef.analyze();
+      if (!colNames.add(colDef.getColName().toLowerCase())) {
+        throw new AnalysisException("Duplicate column name: " + colDef.getColName());
+      }
+    }
+    for (ColumnDef colDef: partitionColDefs_) {
+      colDef.analyze();
+      if (!colDef.getType().supportsTablePartitioning()) {
+        throw new AnalysisException(
+            String.format("Type '%s' is not supported as partition-column type " +
+                "in column: %s", colDef.getType().toSql(), colDef.getColName()));
+      }
+      if (!colNames.add(colDef.getColName().toLowerCase())) {
+        throw new AnalysisException("Duplicate column name: " + colDef.getColName());
+      }
+    }
+  }
+
+  /**
+   * Analyzes the Avro schema and compares it with the columnDefs_ to detect
+   * inconsistencies. Returns a list of column descriptors that should be
+   * used for creating the table (possibly identical to columnDefs_).
+   */
+  private List<ColumnDef> analyzeAvroSchema(Analyzer analyzer)
+      throws AnalysisException {
+    Preconditions.checkState(fileFormat_ == THdfsFileFormat.AVRO);
+    // Look for the schema in TBLPROPERTIES and in SERDEPROPERTIES, with latter
+    // taking precedence.
+    List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
+    schemaSearchLocations.add(serdeProperties_);
+    schemaSearchLocations.add(tblProperties_);
+    String avroSchema = null;
+    List<ColumnDef> avroCols = null; // parsed from avroSchema
+    try {
+      avroSchema = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
+      if (avroSchema == null) {
+        // No Avro schema was explicitly set in the serde or table properties, so infer
+        // the Avro schema from the column definitions.
+        Schema inferredSchema = AvroSchemaConverter.convertColumnDefs(
+            columnDefs_, tableName_.toString());
+        avroSchema = inferredSchema.toString();
+      }
+      if (Strings.isNullOrEmpty(avroSchema)) {
+        throw new AnalysisException("Avro schema is null or empty: " +
+            tableName_.toString());
+      }
+      avroCols = AvroSchemaParser.parse(avroSchema);
+    } catch (SchemaParseException e) {
+      throw new AnalysisException(String.format(
+          "Error parsing Avro schema for table '%s': %s", tableName_.toString(),
+          e.getMessage()));
+    }
+    Preconditions.checkNotNull(avroCols);
+
+    // Analyze the Avro schema to detect inconsistencies with the columnDefs_.
+    // In case of inconsistencies, the column defs are ignored in favor of the Avro
+    // schema for simplicity and, in particular, to enable COMPUTE STATS (IMPALA-1104).
+    StringBuilder warning = new StringBuilder();
+    List<ColumnDef> reconciledColDefs =
+        AvroSchemaUtils.reconcileSchemas(columnDefs_, avroCols, warning);
+    if (warning.length() > 0) analyzer.addWarning(warning.toString());
+    return reconciledColDefs;
+  }
+
+  private void analyzeKuduTable(Analyzer analyzer) throws AnalysisException {
+    // Validate that Kudu table is correctly specified.
+    if (!KuduTable.tableParamsAreValid(getTblProperties())) {
+      throw new AnalysisException("Kudu table is missing parameters " +
+          String.format("in table properties. Please verify if %s, %s, and %s are "
+                  + "present and have valid values.",
+              KuduTable.KEY_TABLE_NAME, KuduTable.KEY_MASTER_ADDRESSES,
+              KuduTable.KEY_KEY_COLUMNS));
+    }
+
+    // Kudu table cannot be a cached table
+    if (cachingOp_ != null) {
+      throw new AnalysisException("A Kudu table cannot be cached in HDFS.");
+    }
+
+    if (distributeParams_ != null) {
+      if (isExternal_) {
+        throw new AnalysisException(
+            "The DISTRIBUTE BY clause may not be specified for external tables.");
+      }
+
+      List<String> keyColumns = KuduUtil.parseKeyColumnsAsList(
+          getTblProperties().get(KuduTable.KEY_KEY_COLUMNS));
+      for (DistributeParam d : distributeParams_) {
+        // If the columns are not set, default to all key columns
+        if (d.getColumns() == null) d.setColumns(keyColumns);
+        d.analyze(analyzer);
+      }
+    } else if (!isExternal_) {
+      throw new AnalysisException(
+          "A data distribution must be specified using the DISTRIBUTE BY clause.");
+    }
+  }
+
+  private Byte analyzeRowFormatValue(String value) throws AnalysisException {
+    if (value == null) return null;
+    Byte byteVal = HdfsStorageDescriptor.parseDelim(value);
+    if (byteVal == null) {
+      throw new AnalysisException("ESCAPED BY values and LINE/FIELD " +
+          "terminators must be specified as a single character or as a decimal " +
+          "value in the range [-128:127]: " + value);
+    }
+    return byteVal;
+  }
+
+  /**
+   * Unescapes all values in the property map.
+   */
+  public static void unescapeProperties(Map<String, String> propertyMap) {
+    if (propertyMap == null) return;
+    for (Map.Entry<String, String> kv : propertyMap.entrySet()) {
+      propertyMap.put(kv.getKey(),
+          new StringLiteral(kv.getValue()).getUnescapedValue());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
new file mode 100644
index 0000000..46b0003
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
@@ -0,0 +1,209 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.PrimitiveType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TFunctionBinaryType;
+import com.cloudera.impala.thrift.TSymbolType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a CREATE AGGREGATE FUNCTION statement.
+ */
+public class CreateUdaStmt extends CreateFunctionStmtBase {
+  private final TypeDef intermediateTypeDef_;
+
+  /**
+   * Builds a CREATE AGGREGATE FUNCTION statement
+   * @param fnName - Name of the function
+   * @param fnArgs - List of types for the arguments to this function
+   * @param retType - The type this function returns.
+   * @param intermediateType_- The type used for the intermediate data.
+   * @param location - Path in HDFS containing the UDA.
+   * @param ifNotExists - If true, no errors are thrown if the function already exists
+   * @param additionalArgs - Key/Value pairs for additional arguments. The keys are
+   *        validated in analyze()
+   */
+  public CreateUdaStmt(FunctionName fnSymbol, FunctionArgs args,
+      TypeDef retTypeDef, TypeDef intermediateTypeDef,
+      HdfsUri location, boolean ifNotExists,
+      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
+    super(fnSymbol, args, retTypeDef, location, ifNotExists, optArgs);
+    intermediateTypeDef_ = intermediateTypeDef;
+  }
+
+  private void reportCouldNotInferSymbol(String function) throws AnalysisException {
+    throw new AnalysisException("Could not infer symbol for "
+        + function + "() function.");
+  }
+
+  // Gets the symbol for 'arg'. If the user set it from the dll, return that. Otherwise
+  // try to infer the Symbol from the Update function. To infer the Symbol, the update
+  // function must contain "update" or "Update" and we switch that out with 'defaultSymbol'.
+  // Returns null if no symbol was found.
+  private String getSymbolSymbol(OptArg arg, String defaultSymbol) {
+    // First lookup if the user explicitly set it.
+    if (optArgs_.get(arg) != null) return optArgs_.get(arg);
+    // Try to match it from Update
+    String updateFn = optArgs_.get(OptArg.UPDATE_FN);
+    // Mangled strings start with _Z. We can't get substitute Symbols for mangled
+    // strings.
+    // TODO: this is doable in the BE with more symbol parsing.
+    if (updateFn.startsWith("_Z")) return null;
+
+    if (updateFn.contains("update")) return updateFn.replace("update", defaultSymbol);
+    if (updateFn.contains("Update")) {
+      char[] array = defaultSymbol.toCharArray();
+      array[0] = Character.toUpperCase(array[0]);
+      String s = new String(array);
+      return updateFn.replace("Update", s);
+    }
+    return null;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Preconditions.checkNotNull(fn_);
+    Preconditions.checkState(fn_ instanceof AggregateFunction);
+    AggregateFunction uda = (AggregateFunction) fn_;
+
+    if (uda.getNumArgs() == 0) {
+      throw new AnalysisException("UDAs must take at least one argument.");
+    }
+
+    if (uda.getBinaryType() == TFunctionBinaryType.JAVA) {
+      throw new AnalysisException("Java UDAs are not supported.");
+    }
+
+    // TODO: these are temporarily restrictions since the BE cannot yet
+    // execute them.
+    if (uda.getBinaryType() == TFunctionBinaryType.IR) {
+      throw new AnalysisException("IR UDAs are not yet supported.");
+    }
+    if (fn_.hasVarArgs()) {
+      throw new AnalysisException("UDAs with varargs are not yet supported.");
+    }
+    if (fn_.getNumArgs() > 8) {
+      throw new AnalysisException(
+          "UDAs with more than 8 arguments are not yet supported.");
+    }
+
+    if (uda.getReturnType().getPrimitiveType() == PrimitiveType.CHAR) {
+      throw new AnalysisException("UDAs with CHAR return type are not yet supported.");
+    }
+    if (uda.getReturnType().getPrimitiveType() == PrimitiveType.VARCHAR) {
+      throw new AnalysisException("UDAs with VARCHAR return type are not yet supported.");
+    }
+    for (int i = 0; i < uda.getNumArgs(); ++i) {
+      if (uda.getArgs()[i].getPrimitiveType() == PrimitiveType.CHAR) {
+        throw new AnalysisException("UDAs with CHAR arguments are not yet supported.");
+      }
+      if (uda.getArgs()[i].getPrimitiveType() == PrimitiveType.VARCHAR) {
+        throw new AnalysisException("UDAs with VARCHAR arguments are not yet supported.");
+      }
+    }
+
+    Type intermediateType = null;
+    if (intermediateTypeDef_ == null) {
+      intermediateType = uda.getReturnType();
+    } else {
+      intermediateTypeDef_.analyze(analyzer);
+      intermediateType = intermediateTypeDef_.getType();
+    }
+    uda.setIntermediateType(intermediateType);
+
+    // Check arguments that are only valid in UDFs are not set.
+    checkOptArgNotSet(OptArg.SYMBOL);
+    checkOptArgNotSet(OptArg.PREPARE_FN);
+    checkOptArgNotSet(OptArg.CLOSE_FN);
+
+    // The user must provide the symbol for Update.
+    uda.setUpdateFnSymbol(uda.lookupSymbol(
+        checkAndGetOptArg(OptArg.UPDATE_FN), TSymbolType.UDF_EVALUATE, intermediateType,
+        uda.hasVarArgs(), uda.getArgs()));
+
+    // If the ddl did not specify the init/serialize/merge/finalize function
+    // Symbols, guess them based on the update fn Symbol.
+    Preconditions.checkNotNull(uda.getUpdateFnSymbol());
+    uda.setInitFnSymbol(getSymbolSymbol(OptArg.INIT_FN, "init"));
+    uda.setSerializeFnSymbol(getSymbolSymbol(OptArg.SERIALIZE_FN, "serialize"));
+    uda.setMergeFnSymbol(getSymbolSymbol(OptArg.MERGE_FN, "merge"));
+    uda.setFinalizeFnSymbol(getSymbolSymbol(OptArg.FINALIZE_FN, "finalize"));
+
+    // Init and merge are required.
+    if (uda.getInitFnSymbol() == null) reportCouldNotInferSymbol("init");
+    if (uda.getMergeFnSymbol() == null) reportCouldNotInferSymbol("merge");
+
+    // Validate that all set symbols exist.
+    uda.setInitFnSymbol(uda.lookupSymbol(uda.getInitFnSymbol(),
+        TSymbolType.UDF_EVALUATE, intermediateType, false));
+    uda.setMergeFnSymbol(uda.lookupSymbol(uda.getMergeFnSymbol(),
+        TSymbolType.UDF_EVALUATE, intermediateType, false, intermediateType));
+    if (uda.getSerializeFnSymbol() != null) {
+      try {
+        uda.setSerializeFnSymbol(uda.lookupSymbol(uda.getSerializeFnSymbol(),
+            TSymbolType.UDF_EVALUATE, null, false, intermediateType));
+      } catch (AnalysisException e) {
+        if (optArgs_.get(OptArg.SERIALIZE_FN) != null) {
+          throw e;
+        } else {
+          // Ignore, these symbols are optional.
+          uda.setSerializeFnSymbol(null);
+        }
+      }
+    }
+    if (uda.getFinalizeFnSymbol() != null) {
+      try {
+        uda.setFinalizeFnSymbol(uda.lookupSymbol(
+            uda.getFinalizeFnSymbol(), TSymbolType.UDF_EVALUATE, null, false,
+            intermediateType));
+      } catch (AnalysisException e) {
+        if (optArgs_.get(OptArg.FINALIZE_FN) != null) {
+          throw e;
+        } else {
+          // Ignore, these symbols are optional.
+          uda.setFinalizeFnSymbol(null);
+        }
+      }
+    }
+
+    // If the intermediate type is not the return type, then finalize is
+    // required.
+    if (!intermediateType.equals(fn_.getReturnType()) &&
+        uda.getFinalizeFnSymbol() == null) {
+      throw new AnalysisException("Finalize() is required for this UDA.");
+    }
+
+    sqlString_ = uda.toSql(ifNotExists_);
+  }
+
+  @Override
+  protected Function createFunction(FunctionName fnName, ArrayList<Type> argTypes,
+      Type retType, boolean hasVarArgs) {
+    return new AggregateFunction(fnName_, args_.getArgTypes(), retTypeDef_.getType(),
+        args_.hasVarArgs());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
new file mode 100644
index 0000000..550d26f
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.PrimitiveType;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.hive.executor.UdfExecutor.JavaUdfDataType;
+import com.cloudera.impala.thrift.TFunctionBinaryType;
+import com.cloudera.impala.thrift.TFunctionCategory;
+import com.cloudera.impala.thrift.TSymbolType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a CREATE FUNCTION statement.
+ */
+public class CreateUdfStmt extends CreateFunctionStmtBase {
+  /**
+   * Builds a CREATE FUNCTION statement
+   * @param fnName - Name of the function
+   * @param fnArgs - List of types for the arguments to this function
+   * @param retType - The type this function returns.
+   * @param location - Path in HDFS containing the UDA.
+   * @param ifNotExists - If true, no errors are thrown if the function already exists
+   * @param additionalArgs - Key/Value pairs for additional arguments. The keys are
+   *        validated in analyze()
+   */
+  public CreateUdfStmt(FunctionName fnName, FunctionArgs args,
+      TypeDef retTypeDef, HdfsUri location, boolean ifNotExists,
+      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
+    super(fnName, args, retTypeDef, location, ifNotExists, optArgs);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Preconditions.checkNotNull(fn_);
+    Preconditions.checkNotNull(fn_ instanceof ScalarFunction);
+    ScalarFunction udf = (ScalarFunction) fn_;
+
+    if (hasSignature()) {
+      if (udf.getBinaryType() == TFunctionBinaryType.JAVA) {
+        if (!JavaUdfDataType.isSupported(udf.getReturnType())) {
+          throw new AnalysisException(
+              "Type " + udf.getReturnType().toSql() + " is not supported for Java UDFs.");
+        }
+        for (int i = 0; i < udf.getNumArgs(); ++i) {
+          if (!JavaUdfDataType.isSupported(udf.getArgs()[i])) {
+            throw new AnalysisException(
+                "Type " + udf.getArgs()[i].toSql() + " is not supported for Java UDFs.");
+          }
+        }
+      }
+
+      if (udf.getReturnType().getPrimitiveType() == PrimitiveType.CHAR) {
+        throw new AnalysisException("UDFs that use CHAR are not yet supported.");
+      }
+      if (udf.getReturnType().getPrimitiveType() == PrimitiveType.VARCHAR) {
+        throw new AnalysisException("UDFs that use VARCHAR are not yet supported.");
+      }
+      for (int i = 0; i < udf.getNumArgs(); ++i) {
+        if (udf.getArgs()[i].getPrimitiveType() == PrimitiveType.CHAR) {
+          throw new AnalysisException("UDFs that use CHAR are not yet supported.");
+        }
+        if (udf.getArgs()[i].getPrimitiveType() == PrimitiveType.VARCHAR) {
+          throw new AnalysisException("UDFs that use VARCHAR are not yet supported.");
+        }
+      }
+    }
+
+    // Check the user provided symbol exists
+    udf.setSymbolName(udf.lookupSymbol(
+        checkAndGetOptArg(OptArg.SYMBOL), TSymbolType.UDF_EVALUATE, null,
+        udf.hasVarArgs(), udf.getArgs()));
+
+    // Set optional Prepare/Close functions
+    String prepareFn = optArgs_.get(OptArg.PREPARE_FN);
+    if (prepareFn != null) {
+      udf.setPrepareFnSymbol(udf.lookupSymbol(prepareFn, TSymbolType.UDF_PREPARE));
+    }
+    String closeFn = optArgs_.get(OptArg.CLOSE_FN);
+    if (closeFn != null) {
+      udf.setCloseFnSymbol(udf.lookupSymbol(closeFn, TSymbolType.UDF_CLOSE));
+    }
+
+    // Udfs should not set any of these
+    checkOptArgNotSet(OptArg.UPDATE_FN);
+    checkOptArgNotSet(OptArg.INIT_FN);
+    checkOptArgNotSet(OptArg.SERIALIZE_FN);
+    checkOptArgNotSet(OptArg.MERGE_FN);
+    checkOptArgNotSet(OptArg.FINALIZE_FN);
+
+    sqlString_ = udf.toSql(ifNotExists_);
+
+    // Check that there is no function with the same name and isPersistent field not
+    // the same as udf.isPersistent_. For example we don't allow two JAVA udfs with
+    // same name and opposite persistence values set. This only applies for JAVA udfs
+    // as all the native udfs are persisted. Additionally we don't throw exceptions
+    // if "IF NOT EXISTS" is specified in the query.
+    if (udf.getBinaryType() != TFunctionBinaryType.JAVA || ifNotExists_) return;
+
+    Preconditions.checkNotNull(db_);
+    for (Function fn: db_.getFunctions(udf.functionName())) {
+      if (!hasSignature() || (hasSignature() && fn.isPersistent())) {
+        throw new AnalysisException(
+            String.format(Analyzer.FN_ALREADY_EXISTS_ERROR_MSG +
+                fn.signatureString()));
+      }
+    }
+  }
+
+  @Override
+  protected Function createFunction(FunctionName fnName, ArrayList<Type> argTypes, Type retType,
+      boolean hasVarArgs) {
+    return new ScalarFunction(fnName, argTypes, retType, hasVarArgs);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
new file mode 100644
index 0000000..c38eef0
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.RuntimeEnv;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TCatalogObjectType;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a CREATE VIEW statement.
+ */
+public class CreateViewStmt extends CreateOrAlterViewStmtBase {
+
+  public CreateViewStmt(boolean ifNotExists, TableName tableName,
+      ArrayList<ColumnDef> columnDefs, String comment, QueryStmt viewDefStmt) {
+    super(ifNotExists, tableName, columnDefs, comment, viewDefStmt);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
+
+    tableName_.analyze();
+    // Use a child analyzer to let views have complex-typed columns.
+    Analyzer viewAnalyzerr = new Analyzer(analyzer);
+    // Enforce Hive column labels for view compatibility.
+    viewAnalyzerr.setUseHiveColLabels(true);
+    viewDefStmt_.analyze(viewAnalyzerr);
+
+    dbName_ = analyzer.getTargetDbName(tableName_);
+    owner_ = analyzer.getUser().getName();
+    if (analyzer.dbContainsTable(dbName_, tableName_.getTbl(), Privilege.CREATE) &&
+        !ifNotExists_) {
+      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
+          String.format("%s.%s", dbName_, tableName_.getTbl()));
+    }
+    analyzer.addAccessEvent(new TAccessEvent(dbName_ + "." + tableName_.getTbl(),
+        TCatalogObjectType.VIEW, Privilege.CREATE.toString()));
+
+    createColumnAndViewDefs(analyzer);
+    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
+      computeLineageGraph(analyzer);
+    }
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("CREATE VIEW ");
+    if (ifNotExists_) sb.append("IF NOT EXISTS ");
+    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
+    sb.append(tableName_.getTbl() + " (");
+    sb.append(Joiner.on(", ").join(columnDefs_));
+    sb.append(") AS ");
+    sb.append(viewDefStmt_.toSql());
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java b/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
new file mode 100644
index 0000000..efa2117
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+
+import com.cloudera.impala.common.Pair;
+import com.cloudera.impala.planner.DataSink;
+import com.cloudera.impala.planner.KuduTableSink;
+import com.cloudera.impala.planner.TableSink;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.client.Delete;
+
+/**
+ * Representation of a DELETE statement.
+ *
+ * A delete statement contains three main parts, the target table reference, the from
+ * clause and the optional where clause. Syntactically, this is represented as follows:
+ *
+ *     DELETE [FROM] dotted_path [WHERE expr]
+ *     DELETE [table_alias] FROM table_ref_list [WHERE expr]
+ *
+ * Only the syntax using the explicit from clause can contain join conditions.
+ */
+public class DeleteStmt extends ModifyStmt {
+
+  public DeleteStmt(List<String> targetTablePath, FromClause tableRefs,
+      Expr wherePredicate, boolean ignoreNotFound) {
+    super(targetTablePath, tableRefs, Lists.<Pair<SlotRef, Expr>>newArrayList(),
+        wherePredicate, ignoreNotFound);
+  }
+
+  public DeleteStmt(DeleteStmt other) {
+    super(other.targetTablePath_, other.fromClause_.clone(),
+        Lists.<Pair<SlotRef, Expr>>newArrayList(), other.wherePredicate_.clone(),
+        other.ignoreNotFound_);
+  }
+
+  public DataSink createDataSink() {
+    // analyze() must have been called before.
+    Preconditions.checkState(table_ != null);
+    TableSink tableSink = TableSink.create(table_, TableSink.Op.DELETE,
+        ImmutableList.<Expr>of(), referencedColumns_, false, ignoreNotFound_);
+    Preconditions.checkState(!referencedColumns_.isEmpty());
+    return tableSink;
+  }
+
+  @Override
+  public DeleteStmt clone() {
+    return new DeleteStmt(this);
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder b = new StringBuilder();
+    b.append("DELETE");
+    if (ignoreNotFound_) b.append(" IGNORE");
+    if (fromClause_.size() > 1 || targetTableRef_.hasExplicitAlias()) {
+      b.append(" ");
+      if (targetTableRef_.hasExplicitAlias()) {
+        b.append(targetTableRef_.getExplicitAlias());
+      } else {
+        b.append(targetTableRef_.toSql());
+      }
+    }
+    b.append(fromClause_.toSql());
+    if (wherePredicate_ != null) {
+      b.append(" WHERE ");
+      b.append(wherePredicate_.toSql());
+    }
+    return b.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
new file mode 100644
index 0000000..0ddd6ec
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TDescribeDbParams;
+import com.cloudera.impala.thrift.TDescribeOutputStyle;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+
+/**
+ * Represents a DESCRIBE DATABASE statement which returns metadata on
+ * a specified database:
+ * Syntax: DESCRIBE DATABASE [FORMATTED|EXTENDED] <db>
+ *
+ * If FORMATTED|EXTENDED is not specified, the statement only returns the given
+ * database's location and comment.
+ * If FORMATTED|EXTENDED is specified, extended metadata on the database is returned.
+ * This metadata includes info about the database's parameters, owner info
+ * and privileges.
+ */
+public class DescribeDbStmt extends StatementBase {
+  private final TDescribeOutputStyle outputStyle_;
+  private final String dbName_;
+
+  public DescribeDbStmt(String dbName, TDescribeOutputStyle outputStyle) {
+    Preconditions.checkState(!Strings.isNullOrEmpty(dbName), "Invalid database name");
+    dbName_ = dbName;
+    outputStyle_ = outputStyle;
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("DESCRIBE DATABASE ");
+    if (outputStyle_ != TDescribeOutputStyle.MINIMAL) {
+      sb.append(outputStyle_.toString() + " ");
+    }
+    return sb.toString() + dbName_;
+  }
+
+  public String getDb() { return dbName_; }
+  public TDescribeOutputStyle getOutputStyle() { return outputStyle_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    analyzer.getDb(dbName_, Privilege.VIEW_METADATA);
+  }
+
+  public TDescribeDbParams toThrift() {
+    TDescribeDbParams params = new TDescribeDbParams();
+    params.setDb(dbName_);
+    params.setOutput_style(outputStyle_);
+    return params;
+  }
+}


[39/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ModifyStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ModifyStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ModifyStmt.java
deleted file mode 100644
index 66f97f5..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ModifyStmt.java
+++ /dev/null
@@ -1,292 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.slf4j.LoggerFactory;
-
-import static java.lang.String.format;
-
-/**
- * Abstract super class for statements that modify existing data like
- * UPDATE and DELETE.
- *
- * The ModifyStmt has four major parts:
- *   - targetTablePath (not null)
- *   - fromClause (not null)
- *   - assignmentExprs (not null, can be empty)
- *   - wherePredicate (nullable)
- *
- * In the analysis phase, a SelectStmt is created with the result expressions set to
- * match the right-hand side of the assignments in addition to projecting the key columns
- * of the underlying table. During query execution, the plan that
- * is generated from this SelectStmt produces all rows that need to be modified.
- *
- * Currently, only Kudu tables can be modified.
- */
-public abstract class ModifyStmt extends StatementBase {
-
-  private final static org.slf4j.Logger LOG = LoggerFactory.getLogger(ModifyStmt.class);
-
-  // List of explicitly mentioned assignment expressions in the UPDATE's SET clause
-  protected final List<Pair<SlotRef, Expr>> assignments_;
-
-  // Optional WHERE clause of the statement
-  protected final Expr wherePredicate_;
-
-  // Path identifying the target table.
-  protected final List<String> targetTablePath_;
-
-  // TableRef identifying the target table, set during analysis.
-  protected TableRef targetTableRef_;
-
-  protected FromClause fromClause_;
-
-  // Result of the analysis of the internal SelectStmt that produces the rows that
-  // will be modified.
-  protected SelectStmt sourceStmt_;
-
-  // Target Kudu table. Since currently only Kudu tables are supported, we use a
-  // concrete table class. Result of analysis.
-  protected KuduTable table_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  // Position mapping of output expressions of the sourceStmt_ to column indices in the
-  // target table. The i'th position in this list maps to the referencedColumns_[i]'th
-  // position in the target table. Set in createSourceStmt() during analysis.
-  protected ArrayList<Integer> referencedColumns_;
-
-  // On tables with a primary key, ignore key not found errors.
-  protected final boolean ignoreNotFound_;
-
-  public ModifyStmt(List<String> targetTablePath, FromClause fromClause,
-      List<Pair<SlotRef, Expr>> assignmentExprs,
-      Expr wherePredicate, boolean ignoreNotFound) {
-    targetTablePath_ = Preconditions.checkNotNull(targetTablePath);
-    fromClause_ = Preconditions.checkNotNull(fromClause);
-    assignments_ = Preconditions.checkNotNull(assignmentExprs);
-    wherePredicate_ = wherePredicate;
-    ignoreNotFound_ = ignoreNotFound;
-  }
-
-  /**
-   * The analysis of the ModifyStmt proceeds as follows: First, the FROM clause is
-   * analyzed and the targetTablePath is verified to be a valid alias into the FROM
-   * clause. When the target table is identified, the assignment expressions are
-   * validated and as a last step the internal SelectStmt is produced and analyzed.
-   * Potential query rewrites for the select statement are implemented here and are not
-   * triggered externally by the statement rewriter.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    fromClause_.analyze(analyzer);
-
-    List<Path> candidates = analyzer.getTupleDescPaths(targetTablePath_);
-    if (candidates.isEmpty()) {
-      throw new AnalysisException(format("'%s' is not a valid table alias or reference.",
-          Joiner.on(".").join(targetTablePath_)));
-    }
-
-    Preconditions.checkState(candidates.size() == 1);
-    Path path = candidates.get(0);
-    path.resolve();
-
-    if (path.destTupleDesc() == null) {
-      throw new AnalysisException(format(
-          "'%s' is not a table alias. Using the FROM clause requires the target table " +
-              "to be a table alias.",
-          Joiner.on(".").join(targetTablePath_)));
-    }
-
-    targetTableRef_ = analyzer.getTableRef(path.getRootDesc().getId());
-    if (targetTableRef_ instanceof InlineViewRef) {
-      throw new AnalysisException(format("Cannot modify view: '%s'",
-          targetTableRef_.toSql()));
-    }
-
-    Preconditions.checkNotNull(targetTableRef_);
-    Table dstTbl = targetTableRef_.getTable();
-    // Only Kudu tables can be updated
-    if (!(dstTbl instanceof KuduTable)) {
-      throw new AnalysisException(
-          format("Impala does not support modifying a non-Kudu table: %s",
-              dstTbl.getFullName()));
-    }
-    table_ = (KuduTable) dstTbl;
-
-    // Make sure that the user is allowed to modify the target table, since no
-    // UPDATE / DELETE privilege exists, we reuse the INSERT one.
-    analyzer.registerPrivReq(new PrivilegeRequestBuilder()
-        .onTable(table_.getDb().getName(), table_.getName())
-        .allOf(Privilege.INSERT).toRequest());
-
-    // Validates the assignments_ and creates the sourceStmt_.
-    if (sourceStmt_ == null) createSourceStmt(analyzer);
-    sourceStmt_.analyze(analyzer);
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    fromClause_.reset();
-    if (sourceStmt_ != null) sourceStmt_.reset();
-    table_ = null;
-  }
-
-  /**
-   * Builds and validates the sourceStmt_. The select list of the sourceStmt_ contains
-   * first the SlotRefs for the key Columns, followed by the expressions representing the
-   * assignments. This method sets the member variables for the sourceStmt_ and the
-   * referencedColumns_.
-   *
-   * This is only run once, on the first analysis. Following analysis will reset() and
-   * reuse previously created statements.
-   */
-  private void createSourceStmt(Analyzer analyzer)
-      throws AnalysisException {
-    // Builds the select list and column position mapping for the target table.
-    ArrayList<SelectListItem> selectList = Lists.newArrayList();
-    referencedColumns_ = Lists.newArrayList();
-    buildAndValidateAssignmentExprs(analyzer, selectList, referencedColumns_);
-
-    // Analyze the generated select statement.
-    sourceStmt_ = new SelectStmt(new SelectList(selectList), fromClause_, wherePredicate_,
-        null, null, null, null);
-
-    // cast result expressions to the correct type of the referenced slot of the
-    // target table
-    int keyColumnsOffset = table_.getKuduKeyColumnNames().size();
-    for (int i = keyColumnsOffset; i < sourceStmt_.resultExprs_.size(); ++i) {
-      sourceStmt_.resultExprs_.set(i, sourceStmt_.resultExprs_.get(i).castTo(
-          assignments_.get(i - keyColumnsOffset).first.getType()));
-    }
-  }
-
-  /**
-   * Validates the list of value assignments that should be used to modify the target
-   * table. It verifies that only those columns are referenced that belong to the target
-   * table, no key columns are modified, and that a single column is not modified multiple
-   * times. Analyzes the Exprs and SlotRefs of assignments_ and writes a list of
-   * SelectListItems to the out parameter selectList that is used to build the select list
-   * for sourceStmt_. A list of integers indicating the column position of an entry in the
-   * select list in the target table is written to the out parameter referencedColumns.
-   *
-   * In addition to the expressions that are generated for each assignment, the
-   * expression list contains an expression for each key column. The key columns
-   * are always prepended to the list of expression representing the assignments.
-   */
-  private void buildAndValidateAssignmentExprs(Analyzer analyzer,
-      ArrayList<SelectListItem> selectList, ArrayList<Integer> referencedColumns)
-      throws AnalysisException {
-    // The order of the referenced columns equals the order of the result expressions
-    HashSet<SlotId> uniqueSlots = Sets.newHashSet();
-    HashSet<SlotId> keySlots = Sets.newHashSet();
-
-    // Mapping from column name to index
-    ArrayList<Column> cols = table_.getColumnsInHiveOrder();
-    HashMap<String, Integer> colIndexMap = Maps.newHashMap();
-    for (int i = 0; i < cols.size(); i++) {
-      colIndexMap.put(cols.get(i).getName(), i);
-    }
-
-    // Add the key columns as slot refs
-    for (String k : table_.getKuduKeyColumnNames()) {
-      ArrayList<String> path = Path.createRawPath(targetTableRef_.getUniqueAlias(), k);
-      SlotRef ref = new SlotRef(path);
-      ref.analyze(analyzer);
-      selectList.add(new SelectListItem(ref, null));
-      uniqueSlots.add(ref.getSlotId());
-      keySlots.add(ref.getSlotId());
-      referencedColumns.add(colIndexMap.get(k));
-    }
-
-    // Assignments are only used in the context of updates.
-    for (Pair<SlotRef, Expr> valueAssignment : assignments_) {
-      Expr rhsExpr = valueAssignment.second;
-      rhsExpr.analyze(analyzer);
-
-      SlotRef lhsSlotRef = valueAssignment.first;
-      lhsSlotRef.analyze(analyzer);
-
-      // Correct target table
-      if (!lhsSlotRef.isBoundByTupleIds(targetTableRef_.getId().asList())) {
-        throw new AnalysisException(
-            format("Left-hand side column '%s' in assignment expression '%s=%s' does not "
-                + "belong to target table '%s'", lhsSlotRef.toSql(), lhsSlotRef.toSql(),
-                rhsExpr.toSql(), targetTableRef_.getDesc().getTable().getFullName()));
-      }
-
-      // No subqueries for rhs expression
-      if (rhsExpr.contains(Subquery.class)) {
-        throw new AnalysisException(
-            format("Subqueries are not supported as update expressions for column '%s'",
-                lhsSlotRef.toSql()));
-      }
-
-      Column c = lhsSlotRef.getResolvedPath().destColumn();
-      // TODO(Kudu) Add test for this code-path when Kudu supports nested types
-      if (c == null) {
-        throw new AnalysisException(
-            format("Left-hand side in assignment expression '%s=%s' must be a column " +
-                "reference", lhsSlotRef.toSql(), rhsExpr.toSql()));
-      }
-
-      if (keySlots.contains(lhsSlotRef.getSlotId())) {
-        throw new AnalysisException(format("Key column '%s' cannot be updated.",
-            lhsSlotRef.toSql()));
-      }
-
-      if (uniqueSlots.contains(lhsSlotRef.getSlotId())) {
-        throw new AnalysisException(
-            format("Duplicate value assignment to column: '%s'", lhsSlotRef.toSql()));
-      }
-
-      rhsExpr = checkTypeCompatibility(
-          targetTableRef_.getDesc().getTable().getFullName(), c, rhsExpr);
-      uniqueSlots.add(lhsSlotRef.getSlotId());
-      selectList.add(new SelectListItem(rhsExpr, null));
-      referencedColumns.add(colIndexMap.get(c.getName()));
-    }
-  }
-
-  public QueryStmt getQueryStmt() { return sourceStmt_; }
-  public abstract DataSink createDataSink();
-  public abstract String toSql();
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/NullLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/NullLiteral.java b/fe/src/main/java/com/cloudera/impala/analysis/NullLiteral.java
deleted file mode 100644
index 212d601..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/NullLiteral.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-public class NullLiteral extends LiteralExpr {
-
-  public NullLiteral() {
-    type_ = Type.NULL;
-    evalCost_ = LITERAL_COST;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected NullLiteral(NullLiteral other) {
-    super(other);
-  }
-
-  /**
-   * Returns an analyzed NullLiteral of the specified type.
-   */
-  public static NullLiteral create(Type type) {
-    NullLiteral l = new NullLiteral();
-    l.analyzeNoThrow(null);
-    l.uncheckedCastTo(type);
-    return l;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return obj instanceof NullLiteral;
-  }
-
-  @Override
-  public int hashCode() { return 0; }
-
-  @Override
-  public String toSqlImpl() { return getStringValue(); }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this).addValue(super.debugString()).toString();
-  }
-
-  @Override
-  public String getStringValue() { return "NULL"; }
-
-  @Override
-  protected Expr uncheckedCastTo(Type targetType) {
-    Preconditions.checkState(targetType.isValid());
-    type_ = targetType;
-    return this;
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.NULL_LITERAL;
-  }
-
-  @Override
-  public Expr clone() { return new NullLiteral(this); }
-
-  @Override
-  protected void resetAnalysisState() {
-    super.resetAnalysisState();
-    type_ = Type.NULL;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/NumericLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/NumericLiteral.java b/fe/src/main/java/com/cloudera/impala/analysis/NumericLiteral.java
deleted file mode 100644
index f3bc9da..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/NumericLiteral.java
+++ /dev/null
@@ -1,317 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.thrift.TDecimalLiteral;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TFloatLiteral;
-import com.cloudera.impala.thrift.TIntLiteral;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-/**
- * Literal for all numeric values, including integer, floating-point and decimal types.
- * Analysis of this expr determines the smallest type that can hold this value.
- */
-public class NumericLiteral extends LiteralExpr {
-  // Use the java BigDecimal (arbitrary scale/precision) to represent the value.
-  // This object has notions of precision and scale but they do *not* match what
-  // we need. BigDecimal's precision is similar to significant figures and scale
-  // is the exponent.
-  // ".1" could be represented with an unscaled value = 1 and scale = 1 or
-  // unscaled value = 100 and scale = 3. Manipulating the value_ (e.g. multiplying
-  // it by 10) does not unnecessarily change the unscaled value. Special care
-  // needs to be taken when converting between the big decimals unscaled value
-  // and ours. (See getUnscaledValue()).
-  private BigDecimal value_;
-
-  // If true, this literal has been explicitly cast to a type and should not
-  // be analyzed (which infers the type from value_).
-  private boolean explicitlyCast_;
-
-  public NumericLiteral(BigDecimal value) {
-    init(value);
-  }
-
-  public NumericLiteral(String value, Type t) throws AnalysisException {
-    BigDecimal val = null;
-    try {
-      val = new BigDecimal(value);
-    } catch (NumberFormatException e) {
-      throw new AnalysisException("invalid numeric literal: " + value, e);
-    }
-    init(val);
-    this.analyze(null);
-    if (type_.isDecimal() && t.isDecimal()) {
-      // Verify that the input decimal value is consistent with the specified
-      // column type.
-      ScalarType scalarType = (ScalarType) t;
-      if (!scalarType.isSupertypeOf((ScalarType) type_)) {
-        StringBuilder errMsg = new StringBuilder();
-        errMsg.append("invalid ").append(t);
-        errMsg.append(" value: " + value);
-        throw new AnalysisException(errMsg.toString());
-      }
-    }
-    if (t.isFloatingPointType()) explicitlyCastToFloat(t);
-  }
-
-  /**
-   * The versions of the ctor that take types assume the type is correct
-   * and the NumericLiteral is created as analyzed with that type. The specified
-   * type is preserved across substitutions and re-analysis.
-   */
-  public NumericLiteral(BigInteger value, Type type) {
-    isAnalyzed_ = true;
-    value_ = new BigDecimal(value);
-    type_ = type;
-    evalCost_ = LITERAL_COST;
-    explicitlyCast_ = true;
-  }
-
-  public NumericLiteral(BigDecimal value, Type type) {
-    isAnalyzed_ = true;
-    value_ = value;
-    type_ = type;
-    evalCost_ = LITERAL_COST;
-    explicitlyCast_ = true;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected NumericLiteral(NumericLiteral other) {
-    super(other);
-    value_ = other.value_;
-    explicitlyCast_ = other.explicitlyCast_;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("value", value_)
-        .add("type", type_)
-        .toString();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((NumericLiteral) obj).value_.equals(value_);
-  }
-
-  @Override
-  public int hashCode() { return value_.hashCode(); }
-
-  @Override
-  public String toSqlImpl() { return getStringValue(); }
-  @Override
-  public String getStringValue() { return value_.toString(); }
-  public double getDoubleValue() { return value_.doubleValue(); }
-  public long getLongValue() { return value_.longValue(); }
-  public long getIntValue() { return value_.intValue(); }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    switch (type_.getPrimitiveType()) {
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-        msg.node_type = TExprNodeType.INT_LITERAL;
-        msg.int_literal = new TIntLiteral(value_.longValue());
-        break;
-      case FLOAT:
-      case DOUBLE:
-        msg.node_type = TExprNodeType.FLOAT_LITERAL;
-        msg.float_literal = new TFloatLiteral(value_.doubleValue());
-        break;
-      case DECIMAL:
-        msg.node_type = TExprNodeType.DECIMAL_LITERAL;
-        TDecimalLiteral literal = new TDecimalLiteral();
-        literal.setValue(getUnscaledValue().toByteArray());
-        msg.decimal_literal = literal;
-        break;
-      default:
-        Preconditions.checkState(false);
-    }
-  }
-
-  public BigDecimal getValue() { return value_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!explicitlyCast_) {
-      // Compute the precision and scale from the BigDecimal.
-      type_ = TypesUtil.computeDecimalType(value_);
-      if (type_ == null) {
-        Double d = new Double(value_.doubleValue());
-        if (d.isInfinite()) {
-          throw new AnalysisException("Numeric literal '" + toSql() +
-              "' exceeds maximum range of doubles.");
-        } else if (d.doubleValue() == 0 && value_ != BigDecimal.ZERO) {
-          throw new AnalysisException("Numeric literal '" + toSql() +
-              "' underflows minimum resolution of doubles.");
-        }
-
-        // Literal could not be stored in any of the supported decimal precisions and
-        // scale. Store it as a float/double instead.
-        float fvalue;
-        fvalue = value_.floatValue();
-        if (fvalue == value_.doubleValue()) {
-          type_ = Type.FLOAT;
-        } else {
-          type_ = Type.DOUBLE;
-        }
-      } else {
-        // Check for integer types.
-        Preconditions.checkState(type_.isScalarType());
-        ScalarType scalarType = (ScalarType) type_;
-        if (scalarType.decimalScale() == 0) {
-          if (value_.compareTo(BigDecimal.valueOf(Byte.MAX_VALUE)) <= 0 &&
-              value_.compareTo(BigDecimal.valueOf(Byte.MIN_VALUE)) >= 0) {
-            type_ = Type.TINYINT;
-          } else if (value_.compareTo(BigDecimal.valueOf(Short.MAX_VALUE)) <= 0 &&
-              value_.compareTo(BigDecimal.valueOf(Short.MIN_VALUE)) >= 0) {
-            type_ = Type.SMALLINT;
-          } else if (value_.compareTo(BigDecimal.valueOf(Integer.MAX_VALUE)) <= 0 &&
-              value_.compareTo(BigDecimal.valueOf(Integer.MIN_VALUE)) >= 0) {
-            type_ = Type.INT;
-          } else if (value_.compareTo(BigDecimal.valueOf(Long.MAX_VALUE)) <= 0 &&
-              value_.compareTo(BigDecimal.valueOf(Long.MIN_VALUE)) >= 0) {
-            type_ = Type.BIGINT;
-          }
-        }
-      }
-    }
-    evalCost_ = LITERAL_COST;
-    isAnalyzed_ = true;
-  }
-
-  /**
-   * Explicitly cast this literal to 'targetType'. The targetType must be a
-   * float point type.
-   */
-  protected void explicitlyCastToFloat(Type targetType) {
-    Preconditions.checkState(targetType.isFloatingPointType());
-    type_ = targetType;
-    explicitlyCast_ = true;
-  }
-
-  @Override
-  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
-    Preconditions.checkState(targetType.isNumericType());
-    // Implicit casting to decimals allows truncating digits from the left of the
-    // decimal point (see TypesUtil). A literal that is implicitly cast to a decimal
-    // with truncation is wrapped into a CastExpr so the BE can evaluate it and report
-    // a warning. This behavior is consistent with casting/overflow of non-constant
-    // exprs that return decimal.
-    // IMPALA-1837: Without the CastExpr wrapping, such literals can exceed the max
-    // expected byte size sent to the BE in toThrift().
-    if (targetType.isDecimal()) {
-      ScalarType decimalType = (ScalarType) targetType;
-      // analyze() ensures that value_ never exceeds the maximum scale and precision.
-      Preconditions.checkState(isAnalyzed_);
-      // Sanity check that our implicit casting does not allow a reduced precision or
-      // truncating values from the right of the decimal point.
-      Preconditions.checkState(value_.precision() <= decimalType.decimalPrecision());
-      Preconditions.checkState(value_.scale() <= decimalType.decimalScale());
-      int valLeftDigits = value_.precision() - value_.scale();
-      int typeLeftDigits = decimalType.decimalPrecision() - decimalType.decimalScale();
-      if (typeLeftDigits < valLeftDigits) return new CastExpr(targetType, this);
-    }
-    type_ = targetType;
-    return this;
-  }
-
-  @Override
-  public void swapSign() throws NotImplementedException {
-    // swapping sign does not change the type
-    value_ = value_.negate();
-  }
-
-  @Override
-  public int compareTo(LiteralExpr o) {
-    int ret = super.compareTo(o);
-    if (ret != 0) return ret;
-    NumericLiteral other = (NumericLiteral) o;
-    return value_.compareTo(other.value_);
-  }
-
-  private void init(BigDecimal value) {
-    isAnalyzed_ = false;
-    value_ = value;
-  }
-
-  // Returns the unscaled value of this literal. BigDecimal doesn't treat scale
-  // the way we do. We need to pad it out with zeros or truncate as necessary.
-  private BigInteger getUnscaledValue() {
-    Preconditions.checkState(type_.isDecimal());
-    BigInteger result = value_.unscaledValue();
-    int valueScale = value_.scale();
-    // If valueScale is less than 0, it indicates the power of 10 to multiply the
-    // unscaled value. This path also handles this case by padding with zeros.
-    // e.g. unscaled value = 123, value scale = -2 means 12300.
-    ScalarType decimalType = (ScalarType) type_;
-    return result.multiply(BigInteger.TEN.pow(decimalType.decimalScale() - valueScale));
-  }
-
-  @Override
-  public Expr clone() { return new NumericLiteral(this); }
-
-  /**
-   * Check overflow.
-   */
-  public static boolean isOverflow(BigDecimal value, Type type)
-      throws AnalysisException {
-    switch (type.getPrimitiveType()) {
-      case TINYINT:
-        return (value.compareTo(BigDecimal.valueOf(Byte.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Byte.MIN_VALUE)) < 0);
-      case SMALLINT:
-        return (value.compareTo(BigDecimal.valueOf(Short.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Short.MIN_VALUE)) < 0);
-      case INT:
-        return (value.compareTo(BigDecimal.valueOf(Integer.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Integer.MIN_VALUE)) < 0);
-      case BIGINT:
-        return (value.compareTo(BigDecimal.valueOf(Long.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Long.MIN_VALUE)) < 0);
-      case FLOAT:
-        return (value.compareTo(BigDecimal.valueOf(Float.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Float.MIN_VALUE)) < 0);
-      case DOUBLE:
-        return (value.compareTo(BigDecimal.valueOf(Double.MAX_VALUE)) > 0 ||
-            value.compareTo(BigDecimal.valueOf(Double.MIN_VALUE)) < 0);
-      case DECIMAL:
-        return (TypesUtil.computeDecimalType(value) == null);
-      default:
-        throw new AnalysisException("Overflow check on " + type + " isn't supported.");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/OrderByElement.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/OrderByElement.java b/fe/src/main/java/com/cloudera/impala/analysis/OrderByElement.java
deleted file mode 100644
index d430152..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/OrderByElement.java
+++ /dev/null
@@ -1,156 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.google.common.collect.Lists;
-
-
-/**
- * Combination of expr, ASC/DESC, and nulls ordering.
- */
-public class OrderByElement {
-  private Expr expr_;
-  private final boolean isAsc_;
-  // Represents the NULLs ordering specified: true when "NULLS FIRST", false when
-  // "NULLS LAST", and null if not specified.
-  private final Boolean nullsFirstParam_;
-
-  /**
-   * Constructs the OrderByElement.
-   *
-   * 'nullsFirstParam' should be true if "NULLS FIRST", false if "NULLS LAST", or null if
-   * the NULLs order was not specified.
-   */
-  public OrderByElement(Expr expr, boolean isAsc, Boolean nullsFirstParam) {
-    super();
-    expr_ = expr;
-    isAsc_ = isAsc;
-    nullsFirstParam_ = nullsFirstParam;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private OrderByElement(OrderByElement other) {
-    expr_ = other.expr_.clone();
-    isAsc_ = other.isAsc_;
-    if (other.nullsFirstParam_ != null) {
-      nullsFirstParam_ = new Boolean(other.nullsFirstParam_.booleanValue());
-    } else {
-      nullsFirstParam_ = null;
-    }
-  }
-
-  public Expr getExpr() { return expr_; }
-  public void setExpr(Expr e) { expr_ = e; }
-  public boolean isAsc() { return isAsc_; }
-  public Boolean getNullsFirstParam() { return nullsFirstParam_; }
-  public boolean nullsFirst() { return nullsFirst(nullsFirstParam_, isAsc_); }
-
-  public String toSql() {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append(expr_.toSql());
-    strBuilder.append(isAsc_ ? " ASC" : " DESC");
-    // When ASC and NULLS LAST or DESC and NULLS FIRST, we do not print NULLS FIRST/LAST
-    // because it is the default behavior and we want to avoid printing NULLS FIRST/LAST
-    // whenever possible as it is incompatible with Hive (SQL compatibility with Hive is
-    // important for views).
-    if (nullsFirstParam_ != null) {
-      if (isAsc_ && nullsFirstParam_) {
-        // If ascending, nulls are last by default, so only add if nulls first.
-        strBuilder.append(" NULLS FIRST");
-      } else if (!isAsc_ && !nullsFirstParam_) {
-        // If descending, nulls are first by default, so only add if nulls last.
-        strBuilder.append(" NULLS LAST");
-      }
-    }
-    return strBuilder.toString();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) return false;
-    if (obj.getClass() != this.getClass()) return false;
-    OrderByElement o = (OrderByElement)obj;
-    boolean nullsFirstEqual =
-      (nullsFirstParam_ == null) == (o.nullsFirstParam_ == null);
-    if (nullsFirstParam_ != null && nullsFirstEqual) {
-      nullsFirstEqual = nullsFirstParam_.equals(o.nullsFirstParam_);
-    }
-    return expr_.equals(o.expr_) && isAsc_ == o.isAsc_ && nullsFirstEqual;
-  }
-
-  @Override
-  public OrderByElement clone() { return new OrderByElement(this); }
-
-  /**
-   * Compute nullsFirst.
-   *
-   * @param nullsFirstParam True if "NULLS FIRST", false if "NULLS LAST", or null if
-   *                        the NULLs order was not specified.
-   * @param isAsc
-   * @return Returns true if nulls are ordered first or false if nulls are ordered last.
-   *         Independent of isAsc.
-   */
-  public static boolean nullsFirst(Boolean nullsFirstParam, boolean isAsc) {
-    return nullsFirstParam == null ? !isAsc : nullsFirstParam;
-  }
-
-  /**
-   * Returns a new list of order-by elements with the order by exprs of src substituted
-   * according to smap. Preserves the other sort params from src.
-   */
-  public static List<OrderByElement> substitute(List<OrderByElement> src,
-      ExprSubstitutionMap smap, Analyzer analyzer) {
-    List<OrderByElement> result = Lists.newArrayListWithCapacity(src.size());
-    for (OrderByElement element: src) {
-      result.add(new OrderByElement(element.getExpr().substitute(smap, analyzer, false),
-          element.isAsc_, element.nullsFirstParam_));
-    }
-    return result;
-  }
-
-  /**
-   * Extracts the order-by exprs from the list of order-by elements and returns them.
-   */
-  public static List<Expr> getOrderByExprs(List<OrderByElement> src) {
-    List<Expr> result = Lists.newArrayListWithCapacity(src.size());
-    for (OrderByElement element: src) {
-      result.add(element.getExpr());
-    }
-    return result;
-  }
-
-  /**
-   * Returns a new list of OrderByElements with the same (cloned) expressions but the
-   * ordering direction reversed (asc becomes desc, nulls first becomes nulls last, etc.)
-   */
-  public static List<OrderByElement> reverse(List<OrderByElement> src) {
-    List<OrderByElement> result = Lists.newArrayListWithCapacity(src.size());
-    for (int i = 0; i < src.size(); ++i) {
-      OrderByElement element = src.get(i);
-      OrderByElement reverseElement =
-          new OrderByElement(element.getExpr().clone(), !element.isAsc_,
-              Boolean.valueOf(!nullsFirst(element.nullsFirstParam_, element.isAsc_)));
-      result.add(reverseElement);
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ParseNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ParseNode.java b/fe/src/main/java/com/cloudera/impala/analysis/ParseNode.java
deleted file mode 100644
index 6f54e26..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ParseNode.java
+++ /dev/null
@@ -1,34 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-
-public interface ParseNode {
-
-  /**
-   * Perform semantic analysis of node and all of its children.
-   * Throws exception if any semantic errors were found.
-   */
-  public void analyze(Analyzer analyzer) throws AnalysisException;
-
-  /**
-   * Returns the SQL string corresponding to this node.
-   */
-  public String toSql();
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/PartitionKeyValue.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/PartitionKeyValue.java b/fe/src/main/java/com/cloudera/impala/analysis/PartitionKeyValue.java
deleted file mode 100644
index 90c44f2..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/PartitionKeyValue.java
+++ /dev/null
@@ -1,88 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a single column:value element in the PARTITION (...) clause of an
- * insert or alter table statement.
- */
-public class PartitionKeyValue {
-  // Name of partitioning column.
-  private final String colName_;
-  // Value of partitioning column. Set to null for dynamic inserts.
-  private final Expr value_;
-  // Evaluation of value for static partition keys, null otherwise. Set in analyze().
-  private LiteralExpr literalValue_;
-
-  public PartitionKeyValue(String colName, Expr value) {
-    this.colName_ = colName.toLowerCase();
-    this.value_ = value;
-  }
-
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isStatic() && !value_.isConstant()) {
-      throw new AnalysisException(
-          String.format("Non-constant expressions are not supported " +
-              "as static partition-key values in '%s'.", toString()));
-    }
-    if (value_ == null) return;
-    value_.analyze(analyzer);
-    literalValue_ = LiteralExpr.create(value_, analyzer.getQueryCtx());
-  }
-
-  public String getColName() { return colName_; }
-  public Expr getValue() { return value_; }
-  public LiteralExpr getLiteralValue() { return literalValue_; }
-  public boolean isDynamic() { return value_ == null; }
-  public boolean isStatic() { return !isDynamic(); }
-
-  @Override
-  public String toString() {
-    return isStatic() ? colName_ + "=" + value_.toSql() : colName_;
-  }
-
-  /**
-   * Returns a binary predicate as a SQL string which matches the column and value of this
-   * PartitionKeyValue. If the value is null, correctly substitutes 'IS' as the operator.
-   */
-  public String toPredicateSql() {
-    String ident = ToSqlUtils.getIdentSql(colName_);
-    if (literalValue_ instanceof NullLiteral ||
-        literalValue_.getStringValue().isEmpty()) {
-      return ident + " IS NULL";
-    }
-    return isStatic() ? ident + "=" + value_.toSql() : ident;
-  }
-
-  /**
-   * Utility method that returns the string value for the given partition key. For
-   * NULL values (a NullLiteral type) or empty literal values this will return the
-   * given null partition key value.
-   */
-  public static String getPartitionKeyValueString(LiteralExpr literalValue,
-      String nullPartitionKeyValue) {
-    Preconditions.checkNotNull(literalValue);
-    if (literalValue instanceof NullLiteral || literalValue.getStringValue().isEmpty()) {
-      return nullPartitionKeyValue;
-    }
-    return literalValue.getStringValue();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/PartitionListItem.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/PartitionListItem.java b/fe/src/main/java/com/cloudera/impala/analysis/PartitionListItem.java
deleted file mode 100644
index 1ffc51e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/PartitionListItem.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-/**
- * Representation of a single column:value element in the PARTITION (...) clause of an insert
- * statement.
- */
-public class PartitionListItem {
-  // Name of partitioning column.
-  private final String colName_;
-  // Value of partitioning column. Set to null for dynamic inserts.
-  private final LiteralExpr value_;
-
-  public PartitionListItem(String colName, LiteralExpr value) {
-    this.colName_ = colName;
-    this.value_ = value;
-  }
-
-  public String getColName() { return colName_; }
-  public LiteralExpr getValue() { return value_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/PartitionSpec.java b/fe/src/main/java/com/cloudera/impala/analysis/PartitionSpec.java
deleted file mode 100644
index 92bf0ae..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/PartitionSpec.java
+++ /dev/null
@@ -1,201 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/*
- * Represents a partition spec - a collection of partition key/values.
- */
-public class PartitionSpec implements ParseNode {
-  private final ImmutableList<PartitionKeyValue> partitionSpec_;
-  private TableName tableName_;
-  private Boolean partitionShouldExist_;
-  private Privilege privilegeRequirement_;
-
-  // Flag to determine if the partition already exists in the target table.
-  // Set during analysis.
-  private Boolean partitionExists_;
-
- // The value Hive is configured to use for NULL partition key values.
- // Set during analysis.
- private String nullPartitionKeyValue_;
-
-  public PartitionSpec(List<PartitionKeyValue> partitionSpec) {
-    this.partitionSpec_ = ImmutableList.copyOf(partitionSpec);
-  }
-
-  public List<PartitionKeyValue> getPartitionSpecKeyValues() {
-    return partitionSpec_;
-  }
-
-  public String getTbl() { return tableName_.getTbl(); }
-  public void setTableName(TableName tableName) { this.tableName_ = tableName; }
-  public boolean partitionExists() {
-    Preconditions.checkNotNull(partitionExists_);
-    return partitionExists_;
-  }
-
-  // The value Hive is configured to use for NULL partition key values.
-  // Set during analysis.
-  public String getNullPartitionKeyValue() {
-    Preconditions.checkNotNull(nullPartitionKeyValue_);
-    return nullPartitionKeyValue_;
-  }
-
-  // If set, an additional analysis check will be performed to validate the target table
-  // contains the given partition spec.
-  public void setPartitionShouldExist() { partitionShouldExist_ = Boolean.TRUE; }
-
-  // If set, an additional analysis check will be performed to validate the target table
-  // does not contain the given partition spec.
-  public void setPartitionShouldNotExist() { partitionShouldExist_ = Boolean.FALSE; }
-
-  // Set the privilege requirement for this partition spec. Must be set prior to
-  // analysis.
-  public void setPrivilegeRequirement(Privilege privilege) {
-    privilegeRequirement_ = privilege;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkNotNull(tableName_);
-    Preconditions.checkNotNull(privilegeRequirement_);
-
-    // Skip adding an audit event when analyzing partitions. The parent table should
-    // be audited outside of the PartitionSpec.
-    Table table = analyzer.getTable(tableName_, privilegeRequirement_, false);
-    String tableName = table.getDb().getName() + "." + getTbl();
-
-    // Make sure the target table is partitioned.
-    if (table.getMetaStoreTable().getPartitionKeysSize() == 0) {
-      throw new AnalysisException("Table is not partitioned: " + tableName);
-    }
-
-    // Make sure static partition key values only contain constant exprs.
-    for (PartitionKeyValue kv: partitionSpec_) {
-      kv.analyze(analyzer);
-    }
-
-    // Get all keys in the target table.
-    Set<String> targetPartitionKeys = Sets.newHashSet();
-    for (FieldSchema fs: table.getMetaStoreTable().getPartitionKeys()) {
-      targetPartitionKeys.add(fs.getName().toLowerCase());
-    }
-
-    // All partition keys need to be specified.
-    if (targetPartitionKeys.size() != partitionSpec_.size()) {
-      throw new AnalysisException(String.format("Items in partition spec must exactly " +
-          "match the partition columns in the table definition: %s (%d vs %d)",
-          tableName, partitionSpec_.size(), targetPartitionKeys.size()));
-    }
-
-    Set<String> keyNames = Sets.newHashSet();
-    // Validate each partition key/value specified, ensuring a matching partition column
-    // exists in the target table, no duplicate keys were specified, and that all the
-    // column types are compatible.
-    for (PartitionKeyValue pk: partitionSpec_) {
-      if (!keyNames.add(pk.getColName().toLowerCase())) {
-        throw new AnalysisException("Duplicate partition key name: " + pk.getColName());
-      }
-
-      Column c = table.getColumn(pk.getColName());
-      if (c == null) {
-        throw new AnalysisException(String.format(
-            "Partition column '%s' not found in table: %s", pk.getColName(), tableName));
-      } else if (!targetPartitionKeys.contains(pk.getColName().toLowerCase())) {
-        throw new AnalysisException(String.format(
-            "Column '%s' is not a partition column in table: %s",
-             pk.getColName(), tableName));
-      } else if (pk.getValue() instanceof NullLiteral) {
-        // No need for further analysis checks of this partition key value.
-        continue;
-      }
-
-      Type colType = c.getType();
-      Type literalType = pk.getValue().getType();
-      Type compatibleType =
-          Type.getAssignmentCompatibleType(colType, literalType, false);
-      if (!compatibleType.isValid()) {
-        throw new AnalysisException(String.format("Value of partition spec (column=%s) "
-            + "has incompatible type: '%s'. Expected type: '%s'.",
-            pk.getColName(), literalType, colType));
-      }
-      // Check for loss of precision with the partition value
-      if (!compatibleType.equals(colType)) {
-        throw new AnalysisException(
-            String.format("Partition key value may result in loss of precision.\n" +
-            "Would need to cast '%s' to '%s' for partition column: %s",
-            pk.getValue().toSql(), colType.toString(), pk.getColName()));
-      }
-    }
-    // Only HDFS tables are partitioned.
-    Preconditions.checkState(table instanceof HdfsTable);
-    HdfsTable hdfsTable = (HdfsTable) table;
-    nullPartitionKeyValue_ = hdfsTable.getNullPartitionKeyValue();
-
-    partitionExists_ = hdfsTable.getPartition(partitionSpec_) != null;
-    if (partitionShouldExist_ != null) {
-      if (partitionShouldExist_ && !partitionExists_) {
-          throw new AnalysisException("Partition spec does not exist: (" +
-              Joiner.on(", ").join(partitionSpec_) + ").");
-      } else if (!partitionShouldExist_ && partitionExists_) {
-          throw new AnalysisException("Partition spec already exists: (" +
-              Joiner.on(", ").join(partitionSpec_) + ").");
-      }
-    }
-  }
-
-  /*
-   * Returns the Thrift representation of this PartitionSpec.
-   */
-  public List<TPartitionKeyValue> toThrift() {
-    List<TPartitionKeyValue> thriftPartitionSpec = Lists.newArrayList();
-    for (PartitionKeyValue kv: partitionSpec_) {
-      String value = PartitionKeyValue.getPartitionKeyValueString(
-          kv.getLiteralValue(),  getNullPartitionKeyValue());
-      thriftPartitionSpec.add(new TPartitionKeyValue(kv.getColName(), value));
-    }
-    return thriftPartitionSpec;
-  }
-
-  @Override
-  public String toSql() {
-    List<String> partitionSpecStr = Lists.newArrayList();
-    for (PartitionKeyValue kv: partitionSpec_) {
-      partitionSpecStr.add(kv.getColName() + "=" + kv.getValue().toSql());
-    }
-    return String.format("PARTITION (%s)", Joiner.on(", ").join(partitionSpecStr));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/Path.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Path.java b/fe/src/main/java/com/cloudera/impala/analysis/Path.java
deleted file mode 100644
index 03c601c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/Path.java
+++ /dev/null
@@ -1,448 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a resolved or unresolved dot-separated path that is rooted at a registered
- * tuple descriptor, catalog table/view, or an existing resolved path.
- *
- * This class implements the resolution logic for mapping an implicit or explicit
- * raw path to the corresponding physical types/positions in the schema tree.
- *
- * Implicit vs. Explicit Paths
- * The item of an array and the key/value of maps are accessed via their implicit field
- * names. However, if the type of an array item or a map value is a struct, then we allow
- * omitting the explicit reference to the struct type in paths for accessing fields
- * within that struct as a shorthand for user convenience. An explicit reference to the
- * struct type is always legal. Paths that explicitly reference such a struct are
- * "physical" because they typically map exactly to the schema representation in the
- * underlying storage format (e.g. Parquet/Avro). Paths that omit the struct reference
- * are called "implicit". During resolution, explicit paths are always preferred over
- * implicit paths for resolving ambiguities.
- *
- * Example
- * create table d.t (
- *   c array<struct<f:int,item:int,pos:int>>
- * );
- *
- * select ... from d.t.c
- *   d.t.c   <-- resolves to type array<struct<f:int,item:int,pos:int>>
- *   c alias <-- type struct<item:struct<f:int,item:int,pos:int>,pos:bigint>>
- *
- * select c.item.f, c.f from d.t.c
- *   c.item.f   <-- explicit path to "f"
- *   c.f        <-- implicit path to "f", skips "item" reference
- *   (same for the unqualified versions item.f and f)
- *
- * select c.item, c.item.item from d.t.c
- *   c.item      <-- explicit path to "item" struct of type struct<f:int,item:string>
- *   c.item.item <-- explicit path to string "item"; there is no logical path to the
- *                   string "item" due to the "item" name conflict
- *   c.pos       <-- explicit path to "pos" of type bigint
- *   c.item.pos  <-- explicit path to "pos" of type int; there is no logical path to the
- *                   int "pos" due to the "pos" name conflict
- *   (same for unqualified versions item, item.item, pos, item.pos)
- *
- * Please refer to TestImplicitAndExplicitPaths() for analogous examples for maps.
- *
- * Illegal Implicit Paths
- * The intention of implicit paths is to allow users to skip a *single* trivial level of
- * indirection in common cases. In particular, it is illegal to implicitly skip multiple
- * levels in a path, illustrated as follows.
- *
- * Example
- * create table d.t (
- *   c array<array<struct<e:int,f:string>>>
- * );
- *
- * select c.f from d.t.c
- * select 1 from d.t.c, c.f
- *   c.f <-- illegal path because it would have to implicitly skip two 'item' fields
- *
- *
- * Uses of Paths and Terminology
- *
- * Uncorrelated References: Star exprs, SlotRefs and TableRefs that are rooted at a
- * catalog Table or a registered TupleDescriptor in the same query block.
- *
- * Relative References: TableRefs that are rooted at a TupleDescriptor.
- *
- * Correlated References: SlotRefs and TableRefs that are rooted at a TupleDescriptor
- * registered in an ancestor query block are called 'correlated'. All correlated
- * references are relative, but not all relative references are correlated.
- *
- * A Path itself is never said to be un/correlated because it is intentionally unaware
- * of the query block that it is used in.
- */
-public class Path {
-  // Implicit field names of collections.
-  public static final String ARRAY_ITEM_FIELD_NAME = "item";
-  public static final String ARRAY_POS_FIELD_NAME = "pos";
-  public static final String MAP_KEY_FIELD_NAME = "key";
-  public static final String MAP_VALUE_FIELD_NAME = "value";
-
-  public static enum PathType {
-    SLOT_REF,
-    TABLE_REF,
-    STAR,
-    ANY, // Reference to any field or table in schema.
-  }
-
-  // Implicit or explicit raw path to be resolved relative to rootDesc_ or rootTable_.
-  // Every raw-path element is mapped to zero, one or two types/positions in resolution.
-  private final List<String> rawPath_;
-
-  // Registered table alias that this path is rooted at, if any.
-  // Null if the path is rooted at a catalog table/view.
-  private final TupleDescriptor rootDesc_;
-
-  // Catalog table that this resolved path is rooted at, if any.
-  // Null if the path is rooted at a registered tuple that does not
-  // belong to a catalog table/view.
-  private final Table rootTable_;
-
-  // Root path that a relative path was created from.
-  private final Path rootPath_;
-
-  // List of matched types and field positions set during resolution. The matched
-  // types/positions describe the physical path through the schema tree.
-  private final List<Type> matchedTypes_ = Lists.newArrayList();
-  private final List<Integer> matchedPositions_ = Lists.newArrayList();
-
-  // Remembers the indices into rawPath_ and matchedTypes_ of the first collection
-  // matched during resolution.
-  private int firstCollectionPathIdx_ = -1;
-  private int firstCollectionTypeIdx_ = -1;
-
-  // Indicates whether this path has been resolved. Set in resolve().
-  private boolean isResolved_ = false;
-
-  // Caches the result of getAbsolutePath() to avoid re-computing it.
-  private List<Integer> absolutePath_ = null;
-
-  /**
-   * Constructs a Path rooted at the given rootDesc.
-   */
-  public Path(TupleDescriptor rootDesc, List<String> rawPath) {
-    Preconditions.checkNotNull(rootDesc);
-    Preconditions.checkNotNull(rawPath);
-    rootTable_ = rootDesc.getTable();
-    rootDesc_ = rootDesc;
-    rootPath_ = null;
-    rawPath_ = rawPath;
-  }
-
-  /**
-   * Constructs a Path rooted at the given rootTable.
-   */
-  public Path(Table rootTable, List<String> rawPath) {
-    Preconditions.checkNotNull(rootTable);
-    Preconditions.checkNotNull(rawPath);
-    rootTable_ = rootTable;
-    rootDesc_ = null;
-    rootPath_ = null;
-    rawPath_ = rawPath;
-  }
-
-  /**
-   * Constructs a new unresolved path relative to an existing resolved path.
-   */
-  public Path(Path rootPath, List<String> relRawPath) {
-    Preconditions.checkNotNull(rootPath);
-    Preconditions.checkState(rootPath.isResolved());
-    Preconditions.checkNotNull(relRawPath);
-    rootTable_ = rootPath.rootTable_;
-    rootDesc_ = rootPath.rootDesc_;
-    rootPath_ = rootPath;
-    rawPath_ = Lists.newArrayListWithCapacity(
-        rootPath.getRawPath().size() + relRawPath.size());
-    rawPath_.addAll(rootPath.getRawPath());
-    rawPath_.addAll(relRawPath);
-    matchedTypes_.addAll(rootPath.matchedTypes_);
-    matchedPositions_.addAll(rootPath.matchedPositions_);
-    firstCollectionPathIdx_ = rootPath.firstCollectionPathIdx_;
-    firstCollectionTypeIdx_ = rootPath.firstCollectionTypeIdx_;
-  }
-
-  /**
-   * Resolves this path in the context of the root tuple descriptor / root table
-   * or continues resolving this relative path from an existing root path.
-   * Returns true if the path could be fully resolved, false otherwise.
-   * A failed resolution leaves this Path in a partially resolved state.
-   */
-  public boolean resolve() {
-    if (isResolved_) return true;
-    Preconditions.checkState(rootDesc_ != null || rootTable_ != null);
-    Type currentType = null;
-    int rawPathIdx = 0;
-    if (rootPath_ != null) {
-      // Continue resolving this path relative to the rootPath_.
-      currentType = rootPath_.destType();
-      rawPathIdx = rootPath_.getRawPath().size();
-    } else if (rootDesc_ != null) {
-      currentType = rootDesc_.getType();
-    } else {
-      // Directly start from the item type because only implicit paths are allowed.
-      currentType = rootTable_.getType().getItemType();
-    }
-
-    // Map all remaining raw-path elements to field types and positions.
-    while (rawPathIdx < rawPath_.size()) {
-      if (!currentType.isComplexType()) return false;
-      StructType structType = getTypeAsStruct(currentType);
-      // Resolve explicit path.
-      StructField field = structType.getField(rawPath_.get(rawPathIdx));
-      if (field == null) {
-        // Resolve implicit path.
-        if (structType instanceof CollectionStructType) {
-          field = ((CollectionStructType) structType).getOptionalField();
-          // Collections must be matched explicitly.
-          if (field.getType().isCollectionType()) return false;
-        } else {
-          // Failed to resolve implicit or explicit path.
-          return false;
-        }
-        // Update the physical types/positions.
-        matchedTypes_.add(field.getType());
-        matchedPositions_.add(field.getPosition());
-        currentType = field.getType();
-        // Do not consume a raw-path element.
-        continue;
-      }
-      matchedTypes_.add(field.getType());
-      matchedPositions_.add(field.getPosition());
-      if (field.getType().isCollectionType() && firstCollectionPathIdx_ == -1) {
-        Preconditions.checkState(firstCollectionTypeIdx_ == -1);
-        firstCollectionPathIdx_ = rawPathIdx;
-        firstCollectionTypeIdx_ = matchedTypes_.size() - 1;
-      }
-      currentType = field.getType();
-      ++rawPathIdx;
-    }
-    Preconditions.checkState(matchedTypes_.size() == matchedPositions_.size());
-    Preconditions.checkState(matchedTypes_.size() >= rawPath_.size());
-    isResolved_ = true;
-    return true;
-  }
-
-  /**
-   * If the given type is a collection, returns a collection struct type representing
-   * named fields of its explicit path. Returns the given type itself if it is already
-   * a struct. Requires that the given type is a complex type.
-   */
-  public static StructType getTypeAsStruct(Type t) {
-    Preconditions.checkState(t.isComplexType());
-    if (t.isStructType()) return (StructType) t;
-    if (t.isArrayType()) {
-      return CollectionStructType.createArrayStructType((ArrayType) t);
-    } else {
-      Preconditions.checkState(t.isMapType());
-      return CollectionStructType.createMapStructType((MapType) t);
-    }
-  }
-
-  public Table getRootTable() { return rootTable_; }
-  public TupleDescriptor getRootDesc() { return rootDesc_; }
-  public boolean isRootedAtTable() { return rootTable_ != null; }
-  public boolean isRootedAtTuple() { return rootDesc_ != null; }
-  public List<String> getRawPath() { return rawPath_; }
-  public boolean isResolved() { return isResolved_; }
-
-  public List<Type> getMatchedTypes() {
-    Preconditions.checkState(isResolved_);
-    return matchedTypes_;
-  }
-
-  public List<Integer> getMatchedPositions() {
-    Preconditions.checkState(isResolved_);
-    return matchedPositions_;
-  }
-
-  public boolean hasNonDestCollection() {
-    Preconditions.checkState(isResolved_);
-    return firstCollectionPathIdx_ != -1 &&
-        firstCollectionPathIdx_ != rawPath_.size() - 1;
-  }
-
-  public String getFirstCollectionName() {
-    Preconditions.checkState(isResolved_);
-    if (firstCollectionPathIdx_ == -1) return null;
-    return rawPath_.get(firstCollectionPathIdx_);
-  }
-
-  public Type getFirstCollectionType() {
-    Preconditions.checkState(isResolved_);
-    if (firstCollectionTypeIdx_ == -1) return null;
-    return matchedTypes_.get(firstCollectionTypeIdx_);
-  }
-
-  public int getFirstCollectionIndex() {
-    Preconditions.checkState(isResolved_);
-    return firstCollectionTypeIdx_;
-  }
-
-  public Type destType() {
-    Preconditions.checkState(isResolved_);
-    if (!matchedTypes_.isEmpty()) return matchedTypes_.get(matchedTypes_.size() - 1);
-    if (rootDesc_ != null) return rootDesc_.getType();
-    if (rootTable_ != null) return rootTable_.getType();
-    return null;
-  }
-
-  public Table destTable() {
-    Preconditions.checkState(isResolved_);
-    if (rootTable_ != null && rootDesc_ == null && matchedTypes_.isEmpty()) {
-      return rootTable_;
-    }
-    return null;
-  }
-
-  /**
-   * Returns the destination Column of this path, or null if the destination of this
-   * path is not a Column. This path must be rooted at a table or a tuple descriptor
-   * corresponding to a table for the destination to be a Column.
-   */
-  public Column destColumn() {
-    Preconditions.checkState(isResolved_);
-    if (rootTable_ == null || rawPath_.size() != 1) return null;
-    return rootTable_.getColumn(rawPath_.get(rawPath_.size() - 1));
-  }
-
-  /**
-   * Returns the destination tuple descriptor of this path, or null
-   * if the destination of this path is not a registered alias.
-   */
-  public TupleDescriptor destTupleDesc() {
-    Preconditions.checkState(isResolved_);
-    if (rootDesc_ != null && matchedTypes_.isEmpty()) return rootDesc_;
-    return null;
-  }
-
-  public List<String> getFullyQualifiedRawPath() {
-    Preconditions.checkState(rootTable_ != null || rootDesc_ != null);
-    List<String> result = Lists.newArrayListWithCapacity(rawPath_.size() + 2);
-    if (rootDesc_ != null) {
-      result.addAll(Lists.newArrayList(rootDesc_.getAlias().split("\\.")));
-    } else {
-      result.add(rootTable_.getDb().getName());
-      result.add(rootTable_.getName());
-    }
-    result.addAll(rawPath_);
-    return result;
-  }
-
-  /**
-   * Returns the absolute explicit path starting from the fully-qualified table name.
-   * The goal is produce a canonical non-ambiguous path that can be used as an
-   * identifier for table and slot references.
-   *
-   * Example:
-   * create table mydb.test (a array<struct<f1:int,f2:string>>);
-   * use mydb;
-   * select f1 from test t, t.a;
-   *
-   * This function should return the following for the path of the 'f1' SlotRef:
-   * mydb.test.a.item.f1
-   */
-  public List<String> getCanonicalPath() {
-    List<String> result = Lists.newArrayList();
-    getCanonicalPath(result);
-    return result;
-  }
-
-  /**
-   * Recursive helper for getCanonicalPath().
-   */
-  private void getCanonicalPath(List<String> result) {
-    Type currentType = null;
-    if (isRootedAtTuple()) {
-      rootDesc_.getPath().getCanonicalPath(result);
-      currentType = rootDesc_.getType();
-    } else {
-      Preconditions.checkNotNull(isRootedAtTable());
-      result.add(rootTable_.getTableName().getDb());
-      result.add(rootTable_.getTableName().getTbl());
-      currentType = rootTable_.getType().getItemType();
-    }
-    // Compute the explicit path from the matched positions. Note that rawPath_ is
-    // not sufficient because it could contain implicit matches.
-    for (int i = 0; i < matchedPositions_.size(); ++i) {
-      StructType structType = getTypeAsStruct(currentType);
-      int matchPos = matchedPositions_.get(i);
-      Preconditions.checkState(matchPos < structType.getFields().size());
-      StructField match = structType.getFields().get(matchPos);
-      result.add(match.getName());
-      currentType = match.getType();
-    }
-  }
-
-  /**
-   * Returns the absolute physical path in positions starting from the schema root to the
-   * destination of this path.
-   */
-  public List<Integer> getAbsolutePath() {
-    if (absolutePath_ != null) return absolutePath_;
-    Preconditions.checkState(isResolved_);
-    absolutePath_ = Lists.newArrayList();
-    if (rootDesc_ != null) absolutePath_.addAll(rootDesc_.getPath().getAbsolutePath());
-    absolutePath_.addAll(matchedPositions_);
-    return absolutePath_;
-  }
-
-  @Override
-  public String toString() {
-    Preconditions.checkState(rootTable_ != null || rootDesc_ != null);
-    String pathRoot = null;
-    if (rootDesc_ != null) {
-      pathRoot = rootDesc_.getAlias();
-    } else {
-      pathRoot = rootTable_.getFullName();
-    }
-    if (rawPath_.isEmpty()) return pathRoot;
-    return pathRoot + "." + Joiner.on(".").join(rawPath_);
-  }
-
-  /**
-   * Returns a raw path from a known root alias and field name.
-   */
-  public static ArrayList<String> createRawPath(String rootAlias, String fieldName) {
-    ArrayList<String> result = Lists.newArrayList(rootAlias.split("\\."));
-    result.add(fieldName);
-    return result;
-  }
-
-  public static Path createRelPath(Path rootPath, String... fieldNames) {
-    Preconditions.checkState(rootPath.isResolved());
-    Path result = new Path(rootPath, Lists.newArrayList(fieldNames));
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/Predicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Predicate.java b/fe/src/main/java/com/cloudera/impala/analysis/Predicate.java
deleted file mode 100644
index 4fadbce..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/Predicate.java
+++ /dev/null
@@ -1,97 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.Reference;
-
-public abstract class Predicate extends Expr {
-  protected boolean isEqJoinConjunct_;
-
-  public Predicate() {
-    super();
-    isEqJoinConjunct_ = false;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected Predicate(Predicate other) {
-    super(other);
-    isEqJoinConjunct_ = other.isEqJoinConjunct_;
-  }
-
-  public boolean isEqJoinConjunct() { return isEqJoinConjunct_; }
-  public void setIsEqJoinConjunct(boolean v) { isEqJoinConjunct_ = v; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    type_ = Type.BOOLEAN;
-    // values: true/false/null
-    numDistinctValues_ = 3;
-  }
-
-  /**
-   * Returns true if one of the children is a slotref (possibly wrapped in a cast)
-   * and the other children are all constant. Returns the slotref in 'slotRef' and
-   * its child index in 'idx'.
-   * This will pick up something like "col = 5", but not "2 * col = 10", which is
-   * what we want.
-   */
-  public boolean isSingleColumnPredicate(
-      Reference<SlotRef> slotRefRef, Reference<Integer> idxRef) {
-    // find slotref
-    SlotRef slotRef = null;
-    int i = 0;
-    for (; i < children_.size(); ++i) {
-      slotRef = getChild(i).unwrapSlotRef(false);
-      if (slotRef != null) break;
-    }
-    if (slotRef == null) return false;
-
-    // make sure everything else is constant
-    for (int j = 0; j < children_.size(); ++j) {
-      if (i == j) continue;
-      if (!getChild(j).isConstant()) return false;
-    }
-
-    if (slotRefRef != null) slotRefRef.setRef(slotRef);
-    if (idxRef != null) idxRef.setRef(Integer.valueOf(i));
-    return true;
-  }
-
-  public static boolean isEquivalencePredicate(Expr expr) {
-    return (expr instanceof BinaryPredicate)
-        && ((BinaryPredicate) expr).getOp().isEquivalence();
-  }
-
-  /**
-   * If predicate is of the form "<slotref> = <slotref>", returns both SlotRefs,
-   * otherwise returns null.
-   */
-  public Pair<SlotId, SlotId> getEqSlots() { return null; }
-
-  /**
-   * Returns the SlotRef bound by this Predicate.
-   */
-  public SlotRef getBoundSlot() { return null; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/PrivilegeSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/PrivilegeSpec.java b/fe/src/main/java/com/cloudera/impala/analysis/PrivilegeSpec.java
deleted file mode 100644
index 2948e58..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/PrivilegeSpec.java
+++ /dev/null
@@ -1,287 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a privilege spec from a GRANT/REVOKE statement.
- * A privilege spec may correspond to one or more privileges. Currently, a privilege spec
- * can represent multiple privileges only at the COLUMN scope.
- */
-public class PrivilegeSpec implements ParseNode {
-  private final TPrivilegeScope scope_;
-  private final TPrivilegeLevel privilegeLevel_;
-  private final TableName tableName_;
-  private final HdfsUri uri_;
-  private final List<String> columnNames_;
-
-  // Set/modified during analysis
-  private String dbName_;
-  private String serverName_;
-
-  private PrivilegeSpec(TPrivilegeLevel privilegeLevel, TPrivilegeScope scope,
-      String serverName, String dbName, TableName tableName, HdfsUri uri,
-      List<String> columnNames) {
-    Preconditions.checkNotNull(scope);
-    Preconditions.checkNotNull(privilegeLevel);
-    privilegeLevel_ = privilegeLevel;
-    scope_ = scope;
-    serverName_ = serverName;
-    tableName_ = tableName;
-    dbName_ = (tableName_ != null ? tableName_.getDb() : dbName);
-    uri_ = uri;
-    columnNames_ = columnNames;
-  }
-
-  public static PrivilegeSpec createServerScopedPriv(TPrivilegeLevel privilegeLevel) {
-    return createServerScopedPriv(privilegeLevel, null);
-  }
-
-  public static PrivilegeSpec createServerScopedPriv(TPrivilegeLevel privilegeLevel,
-      String serverName) {
-    return new PrivilegeSpec(privilegeLevel, TPrivilegeScope.SERVER, serverName, null,
-        null, null, null);
-  }
-
-  public static PrivilegeSpec createDbScopedPriv(TPrivilegeLevel privilegeLevel,
-      String dbName) {
-    Preconditions.checkNotNull(dbName);
-    return new PrivilegeSpec(privilegeLevel, TPrivilegeScope.DATABASE, null, dbName,
-        null, null, null);
-  }
-
-  public static PrivilegeSpec createTableScopedPriv(TPrivilegeLevel privilegeLevel,
-      TableName tableName) {
-    Preconditions.checkNotNull(tableName);
-    return new PrivilegeSpec(privilegeLevel, TPrivilegeScope.TABLE, null, null, tableName,
-        null, null);
-  }
-
-  public static PrivilegeSpec createColumnScopedPriv(TPrivilegeLevel privilegeLevel,
-        TableName tableName, List<String> columnNames) {
-    Preconditions.checkNotNull(tableName);
-    Preconditions.checkNotNull(columnNames);
-    return new PrivilegeSpec(privilegeLevel, TPrivilegeScope.COLUMN, null, null,
-        tableName, null, columnNames);
-  }
-
-  public static PrivilegeSpec createUriScopedPriv(TPrivilegeLevel privilegeLevel,
-      HdfsUri uri) {
-    Preconditions.checkNotNull(uri);
-    return new PrivilegeSpec(privilegeLevel, TPrivilegeScope.URI, null, null, null, uri,
-        null);
-  }
-
-  public List<TPrivilege> toThrift() {
-    List<TPrivilege> privileges = Lists.newArrayList();
-    if (scope_ == TPrivilegeScope.COLUMN) {
-      // Create a TPrivilege for every referenced column
-      for (String column: columnNames_) {
-        privileges.add(createTPrivilege(column));
-      }
-    } else {
-      privileges.add(createTPrivilege(null));
-    }
-    return privileges;
-  }
-
-  /**
-   * Helper function to construct a TPrivilege from this privilege spec. If the scope is
-   * COLUMN, 'columnName' must be a non-null column name. Otherwise, 'columnName' is
-   * null.
-   */
-  private TPrivilege createTPrivilege(String columnName) {
-    Preconditions.checkState(columnName == null ^ scope_ == TPrivilegeScope.COLUMN);
-    TPrivilege privilege = new TPrivilege();
-    privilege.setScope(scope_);
-    privilege.setServer_name(serverName_);
-    // We don't currently filter on privilege level, so set it to an arbitrary value.
-    privilege.setPrivilege_level(privilegeLevel_);
-    if (dbName_ != null) privilege.setDb_name(dbName_);
-    if (tableName_ != null) privilege.setTable_name(tableName_.getTbl());
-    if (uri_ != null) privilege.setUri(uri_.toString());
-    if (columnName != null) privilege.setColumn_name(columnName);
-    privilege.setCreate_time_ms(-1);
-    privilege.setPrivilege_name(RolePrivilege.buildRolePrivilegeName(privilege));
-    return privilege;
-  }
-
-  /**
-   * Return the table path of a COLUMN level privilege. The table path consists
-   * of server name, database name and table name.
-   */
-  public static String getTablePath(TPrivilege privilege) {
-    Preconditions.checkState(privilege.getScope() == TPrivilegeScope.COLUMN);
-    Joiner joiner = Joiner.on(".");
-    return joiner.join(privilege.getServer_name(), privilege.getDb_name(),
-        privilege.getTable_name());
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder(privilegeLevel_.toString());
-    sb.append(" ON ");
-    sb.append(scope_.toString());
-    if (scope_ == TPrivilegeScope.SERVER && serverName_ != null) {
-      sb.append(" " + serverName_);
-    } else if (scope_ == TPrivilegeScope.DATABASE) {
-      sb.append(" " + dbName_);
-    } else if (scope_ == TPrivilegeScope.TABLE) {
-      sb.append(" " + tableName_.toString());
-    } else if (scope_ == TPrivilegeScope.COLUMN) {
-      sb.append("(");
-      sb.append(Joiner.on(",").join(columnNames_));
-      sb.append(")");
-      sb.append(" " + tableName_.toString());
-    } else if (scope_ == TPrivilegeScope.URI) {
-      sb.append(" '" + uri_.getLocation() + "'");
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    String configServerName = analyzer.getAuthzConfig().getServerName();
-    if (serverName_ != null && !serverName_.equals(configServerName)) {
-      throw new AnalysisException(String.format("Specified server name '%s' does not " +
-          "match the configured server name '%s'", serverName_, configServerName));
-    }
-    serverName_ = configServerName;
-    Preconditions.checkState(!Strings.isNullOrEmpty(serverName_));
-    Preconditions.checkNotNull(scope_);
-
-    switch (scope_) {
-      case SERVER:
-        if (privilegeLevel_ != TPrivilegeLevel.ALL) {
-          throw new AnalysisException("Only 'ALL' privilege may be applied at " +
-              "SERVER scope in privilege spec.");
-        }
-        break;
-      case DATABASE:
-        Preconditions.checkState(!Strings.isNullOrEmpty(dbName_));
-        try {
-          analyzer.getDb(dbName_, true);
-        } catch (AnalysisException e) {
-          throw new AnalysisException(String.format("Error setting privileges for " +
-              "database '%s'. Verify that the database exists and that you have " +
-              "permissions to issue a GRANT/REVOKE statement.", dbName_));
-        }
-        break;
-      case URI:
-        Preconditions.checkNotNull(uri_);
-        if (privilegeLevel_ != TPrivilegeLevel.ALL) {
-          throw new AnalysisException("Only 'ALL' privilege may be applied at " +
-              "URI scope in privilege spec.");
-        }
-        uri_.analyze(analyzer, Privilege.ALL, false);
-        break;
-      case TABLE:
-        analyzeTargetTable(analyzer);
-        break;
-      case COLUMN:
-        analyzeColumnPrivScope(analyzer);
-        break;
-      default:
-        throw new IllegalStateException("Unknown TPrivilegeScope in privilege spec: " +
-            scope_.toString());
-    }
-  }
-
-  /**
-   * Analyzes a privilege spec at the COLUMN scope.
-   * Throws an AnalysisException in the following cases:
-   * 1. No columns are specified.
-   * 2. Privilege is applied on a view or an external data source.
-   * 3. Referenced table and/or columns do not exist.
-   * 4. Privilege level is not SELECT.
-   */
-  private void analyzeColumnPrivScope(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(scope_ == TPrivilegeScope.COLUMN);
-    Preconditions.checkNotNull(columnNames_);
-    if (columnNames_.isEmpty()) {
-      throw new AnalysisException("Empty column list in column privilege spec.");
-    }
-    if (privilegeLevel_ != TPrivilegeLevel.SELECT) {
-      throw new AnalysisException("Only 'SELECT' privileges are allowed " +
-          "in a column privilege spec.");
-    }
-    Table table = analyzeTargetTable(analyzer);
-    if (table instanceof View) {
-      throw new AnalysisException("Column-level privileges on views are not " +
-          "supported.");
-    }
-    if (table instanceof DataSourceTable) {
-      throw new AnalysisException("Column-level privileges on external data " +
-          "source tables are not supported.");
-    }
-    for (String columnName: columnNames_) {
-      if (table.getColumn(columnName) == null) {
-        // The error message should not reveal the existence or absence of a column.
-        throw new AnalysisException(String.format("Error setting column-level " +
-            "privileges for table '%s'. Verify that both table and columns exist " +
-            "and that you have permissions to issue a GRANT/REVOKE statement.",
-            tableName_.toString()));
-      }
-    }
-  }
-
-  /**
-   * Verifies that the table referenced in the privilege spec exists in the catalog and
-   * returns the catalog object.
-   * Throws an AnalysisException in the following cases:
-   * 1. The table name is not valid.
-   * 2. Table is not loaded in the catalog.
-   * 3. Table does not exist.
-   */
-  private Table analyzeTargetTable(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(scope_ == TPrivilegeScope.TABLE ||
-        scope_ == TPrivilegeScope.COLUMN);
-    Preconditions.checkState(!Strings.isNullOrEmpty(tableName_.getTbl()));
-    Table table = null;
-    try {
-      dbName_ = analyzer.getTargetDbName(tableName_);
-      Preconditions.checkNotNull(dbName_);
-      table = analyzer.getTable(dbName_, tableName_.getTbl());
-    } catch (TableLoadingException e) {
-      throw new AnalysisException(e.getMessage(), e);
-    } catch (AnalysisException e) {
-      if (analyzer.hasMissingTbls()) throw e;
-      throw new AnalysisException(String.format("Error setting privileges for " +
-          "table '%s'. Verify that the table exists and that you have permissions " +
-          "to issue a GRANT/REVOKE statement.", tableName_.toString()));
-    }
-    Preconditions.checkNotNull(table);
-    return table;
-  }
-}


[28/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsTable.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsTable.java
deleted file mode 100644
index 2464376..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsTable.java
+++ /dev/null
@@ -1,1958 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.avro.Schema;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.BlockStorageLocation;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.VolumeId;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.PartitionKeyValue;
-import com.cloudera.impala.catalog.HdfsPartition.BlockReplica;
-import com.cloudera.impala.catalog.HdfsPartition.FileBlock;
-import com.cloudera.impala.catalog.HdfsPartition.FileDescriptor;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.THdfsPartitionLocation;
-import com.cloudera.impala.thrift.THdfsTable;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.AvroSchemaConverter;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.FsPermissionChecker;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.cloudera.impala.util.ListMap;
-import com.cloudera.impala.util.MetaStoreUtil;
-import com.cloudera.impala.util.TAccessLevelUtil;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableMap;
-
-/**
- * Internal representation of table-related metadata of a file-resident table on a
- * Hadoop filesystem. The table data can be accessed through libHDFS (which is more of
- * an abstraction over Hadoop's FileSystem class rather than DFS specifically). A
- * partitioned table can even span multiple filesystems.
- *
- * This class is not thread-safe. Clients of this class need to protect against
- * concurrent updates using external locking (see CatalogOpExecutor class).
- *
- * Owned by Catalog instance.
- * The partition keys constitute the clustering columns.
- *
- */
-public class HdfsTable extends Table {
-  // hive's default value for table property 'serialization.null.format'
-  private static final String DEFAULT_NULL_COLUMN_VALUE = "\\N";
-
-  // Name of default partition for unpartitioned tables
-  private static final String DEFAULT_PARTITION_NAME = "";
-
-  // Number of times to retry fetching the partitions from the HMS should an error occur.
-  private final static int NUM_PARTITION_FETCH_RETRIES = 5;
-
-  // Table property key for skip.header.line.count
-  public static final String TBL_PROP_SKIP_HEADER_LINE_COUNT = "skip.header.line.count";
-
-  // An invalid network address, which will always be treated as remote.
-  private final static TNetworkAddress REMOTE_NETWORK_ADDRESS =
-      new TNetworkAddress("remote*addr", 0);
-
-  // Minimum block size in bytes allowed for synthetic file blocks (other than the last
-  // block, which may be shorter).
-  private final static long MIN_SYNTHETIC_BLOCK_SIZE = 1024 * 1024;
-
-  // string to indicate NULL. set in load() from table properties
-  private String nullColumnValue_;
-
-  // hive uses this string for NULL partition keys. Set in load().
-  private String nullPartitionKeyValue_;
-
-  // Avro schema of this table if this is an Avro table, otherwise null. Set in load().
-  private String avroSchema_ = null;
-
-  // Set to true if any of the partitions have Avro data.
-  private boolean hasAvroData_ = false;
-
-  // True if this table's metadata is marked as cached. Does not necessarily mean the
-  // data is cached or that all/any partitions are cached.
-  private boolean isMarkedCached_ = false;
-
-  private static boolean hasLoggedDiskIdFormatWarning_ = false;
-
-  // Array of sorted maps storing the association between partition values and
-  // partition ids. There is one sorted map per partition key.
-  // TODO: We should not populate this for HdfsTable objects stored in the catalog
-  // server.
-  private ArrayList<TreeMap<LiteralExpr, HashSet<Long>>> partitionValuesMap_ =
-      Lists.newArrayList();
-
-  // Array of partition id sets that correspond to partitions with null values
-  // in the partition keys; one set per partition key.
-  private ArrayList<HashSet<Long>> nullPartitionIds_ = Lists.newArrayList();
-
-  // Map of partition ids to HdfsPartitions.
-  private HashMap<Long, HdfsPartition> partitionMap_ = Maps.newHashMap();
-
-  // Map of partition name to HdfsPartition object. Used for speeding up
-  // table metadata loading.
-  private HashMap<String, HdfsPartition> nameToPartitionMap_ = Maps.newHashMap();
-
-  // Store all the partition ids of an HdfsTable.
-  private HashSet<Long> partitionIds_ = Sets.newHashSet();
-
-  // Maximum size (in bytes) of incremental stats the catalog is allowed to serialize per
-  // table. This limit is set as a safety check, to prevent the JVM from hitting a
-  // maximum array limit of 1GB (or OOM) while building the thrift objects to send to
-  // impalads.
-  public static final long MAX_INCREMENTAL_STATS_SIZE_BYTES = 200 * 1024 * 1024;
-
-  // Estimate (in bytes) of the incremental stats size per column per partition
-  public static final long STATS_SIZE_PER_COLUMN_BYTES = 400;
-
-  // Bi-directional map between an integer index and a unique datanode
-  // TNetworkAddresses, each of which contains blocks of 1 or more
-  // files in this table. The network addresses are stored using IP
-  // address as the host name. Each FileBlock specifies a list of
-  // indices within this hostIndex_ to specify which nodes contain
-  // replicas of the block.
-  private final ListMap<TNetworkAddress> hostIndex_ = new ListMap<TNetworkAddress>();
-
-  private HdfsPartitionLocationCompressor partitionLocationCompressor_;
-
-  // Map of file names to file descriptors for each partition location (directory).
-  private Map<String, Map<String, FileDescriptor>>
-      perPartitionFileDescMap_ = Maps.newHashMap();
-
-  // Total number of Hdfs files in this table. Set in load().
-  private long numHdfsFiles_;
-
-  // Sum of sizes of all Hdfs files in this table. Set in load().
-  private long totalHdfsBytes_;
-
-  // True iff the table's partitions are located on more than one filesystem.
-  private boolean multipleFileSystems_ = false;
-
-  // Base Hdfs directory where files of this table are stored.
-  // For unpartitioned tables it is simply the path where all files live.
-  // For partitioned tables it is the root directory
-  // under which partition dirs are placed.
-  protected String hdfsBaseDir_;
-
-  // List of FieldSchemas that correspond to the non-partition columns. Used when
-  // describing this table and its partitions to the HMS (e.g. as part of an alter table
-  // operation), when only non-partition columns are required.
-  private final List<FieldSchema> nonPartFieldSchemas_ = Lists.newArrayList();
-
-  // Flag to check if the table schema has been loaded. Used as a precondition
-  // for setAvroSchema().
-  private boolean isSchemaLoaded_ = false;
-
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsTable.class);
-
-  // Caching this configuration object makes calls to getFileSystem much quicker
-  // (saves ~50ms on a standard plan)
-  // TODO(henry): confirm that this is thread safe - cursory inspection of the class
-  // and its usage in getFileSystem suggests it should be.
-  private static final Configuration CONF = new Configuration();
-
-  private static final boolean SUPPORTS_VOLUME_ID;
-
-  // Wrapper around a FileSystem object to hash based on the underlying FileSystem's
-  // scheme and authority.
-  private static class FsKey {
-    FileSystem filesystem;
-
-    public FsKey(FileSystem fs) { filesystem = fs; }
-
-    @Override
-    public int hashCode() { return filesystem.getUri().hashCode(); }
-
-    @Override
-    public boolean equals(Object o) {
-      if (o == this) return true;
-      if (o != null && o instanceof FsKey) {
-        URI uri = filesystem.getUri();
-        URI otherUri = ((FsKey)o).filesystem.getUri();
-        return uri.equals(otherUri);
-      }
-      return false;
-    }
-
-    @Override
-    public String toString() { return filesystem.getUri().toString(); }
-  }
-
-  // Keeps track of newly added THdfsFileBlock metadata and its corresponding
-  // BlockLocation.  For each i, blocks.get(i) corresponds to locations.get(i).  Once
-  // all the new file blocks are collected, the disk volume IDs are retrieved in one
-  // batched DFS call.
-  private static class FileBlocksInfo {
-    final List<THdfsFileBlock> blocks = Lists.newArrayList();
-    final List<BlockLocation> locations = Lists.newArrayList();
-
-    public void addBlocks(List<THdfsFileBlock> b, List<BlockLocation> l) {
-      Preconditions.checkState(b.size() == l.size());
-      blocks.addAll(b);
-      locations.addAll(l);
-    }
-  }
-
-  public HdfsTable(TableId id, org.apache.hadoop.hive.metastore.api.Table msTbl,
-      Db db, String name, String owner) {
-    super(id, msTbl, db, name, owner);
-    partitionLocationCompressor_ =
-        new HdfsPartitionLocationCompressor(numClusteringCols_);
-  }
-
-  static {
-    SUPPORTS_VOLUME_ID =
-        CONF.getBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
-                        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
-  }
-
-  /**
-   * Returns a disk id (0-based) index from the Hdfs VolumeId object.
-   * There is currently no public API to get at the volume id. We'll have to get it
-   * by accessing the internals.
-   */
-  private static int getDiskId(VolumeId hdfsVolumeId) {
-    // Initialize the diskId as -1 to indicate it is unknown
-    int diskId = -1;
-
-    if (hdfsVolumeId != null) {
-      // TODO: this is a hack and we'll have to address this by getting the
-      // public API. Also, we need to be very mindful of this when we change
-      // the version of HDFS.
-      String volumeIdString = hdfsVolumeId.toString();
-      // This is the hacky part. The toString is currently the underlying id
-      // encoded as hex.
-      byte[] volumeIdBytes = StringUtils.hexStringToByte(volumeIdString);
-      if (volumeIdBytes != null && volumeIdBytes.length == 4) {
-        diskId = Bytes.toInt(volumeIdBytes);
-      } else if (!hasLoggedDiskIdFormatWarning_) {
-        LOG.warn("wrong disk id format: " + volumeIdString);
-        hasLoggedDiskIdFormatWarning_ = true;
-      }
-    }
-    return diskId;
-  }
-
-  public boolean spansMultipleFileSystems() { return multipleFileSystems_; }
-
-  /**
-   * Returns true if the table resides at a location which supports caching (e.g. HDFS).
-   */
-  public boolean isLocationCacheable() {
-    return FileSystemUtil.isPathCacheable(new Path(getLocation()));
-  }
-
-  /**
-   * Returns true if the table and all its partitions reside at locations which
-   * support caching (e.g. HDFS).
-   */
-  public boolean isCacheable() {
-    if (!isLocationCacheable()) return false;
-    if (!isMarkedCached() && numClusteringCols_ > 0) {
-      for (HdfsPartition partition: getPartitions()) {
-        if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-          continue;
-        }
-        if (!partition.isCacheable()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Queries the filesystem to load the file block metadata (e.g. DFS blocks) for the
-   * given file.  Adds the newly created block metadata and block location to the
-   * perFsFileBlocks, so that the disk IDs for each block can be retrieved with one
-   * call to DFS.
-   */
-  private void loadBlockMetadata(FileSystem fs, FileStatus file, FileDescriptor fd,
-      HdfsFileFormat fileFormat, Map<FsKey, FileBlocksInfo> perFsFileBlocks) {
-    Preconditions.checkNotNull(fd);
-    Preconditions.checkNotNull(perFsFileBlocks);
-    Preconditions.checkArgument(!file.isDirectory());
-    LOG.debug("load block md for " + name_ + " file " + fd.getFileName());
-
-    if (!FileSystemUtil.hasGetFileBlockLocations(fs)) {
-      synthesizeBlockMetadata(fs, fd, fileFormat);
-      return;
-    }
-    try {
-      BlockLocation[] locations = fs.getFileBlockLocations(file, 0, file.getLen());
-      Preconditions.checkNotNull(locations);
-
-      // Loop over all blocks in the file.
-      for (BlockLocation loc: locations) {
-        Preconditions.checkNotNull(loc);
-        // Get the location of all block replicas in ip:port format.
-        String[] blockHostPorts = loc.getNames();
-        // Get the hostnames for all block replicas. Used to resolve which hosts
-        // contain cached data. The results are returned in the same order as
-        // block.getNames() so it allows us to match a host specified as ip:port to
-        // corresponding hostname using the same array index.
-        String[] blockHostNames = loc.getHosts();
-        Preconditions.checkState(blockHostNames.length == blockHostPorts.length);
-        // Get the hostnames that contain cached replicas of this block.
-        Set<String> cachedHosts =
-            Sets.newHashSet(Arrays.asList(loc.getCachedHosts()));
-        Preconditions.checkState(cachedHosts.size() <= blockHostNames.length);
-
-        // Now enumerate all replicas of the block, adding any unknown hosts
-        // to hostMap_/hostList_. The host ID (index in to the hostList_) for each
-        // replica is stored in replicaHostIdxs.
-        List<BlockReplica> replicas = Lists.newArrayListWithExpectedSize(
-            blockHostPorts.length);
-        for (int i = 0; i < blockHostPorts.length; ++i) {
-          TNetworkAddress networkAddress = BlockReplica.parseLocation(blockHostPorts[i]);
-          Preconditions.checkState(networkAddress != null);
-          replicas.add(new BlockReplica(hostIndex_.getIndex(networkAddress),
-              cachedHosts.contains(blockHostNames[i])));
-        }
-        fd.addFileBlock(new FileBlock(loc.getOffset(), loc.getLength(), replicas));
-      }
-      // Remember the THdfsFileBlocks and corresponding BlockLocations.  Once all the
-      // blocks are collected, the disk IDs will be queried in one batch per filesystem.
-      addPerFsFileBlocks(perFsFileBlocks, fs, fd.getFileBlocks(),
-          Arrays.asList(locations));
-    } catch (IOException e) {
-      throw new RuntimeException("couldn't determine block locations for path '" +
-          file.getPath() + "':\n" + e.getMessage(), e);
-    }
-  }
-
-  /**
-   * For filesystems that don't override getFileBlockLocations, synthesize file blocks
-   * by manually splitting the file range into fixed-size blocks.  That way, scan
-   * ranges can be derived from file blocks as usual.  All synthesized blocks are given
-   * an invalid network address so that the scheduler will treat them as remote.
-   */
-  private void synthesizeBlockMetadata(FileSystem fs, FileDescriptor fd,
-      HdfsFileFormat fileFormat) {
-    long start = 0;
-    long remaining = fd.getFileLength();
-    // Workaround HADOOP-11584 by using the filesystem default block size rather than
-    // the block size from the FileStatus.
-    // TODO: after HADOOP-11584 is resolved, get the block size from the FileStatus.
-    long blockSize = fs.getDefaultBlockSize();
-    if (blockSize < MIN_SYNTHETIC_BLOCK_SIZE) blockSize = MIN_SYNTHETIC_BLOCK_SIZE;
-    if (!fileFormat.isSplittable(HdfsCompression.fromFileName(fd.getFileName()))) {
-      blockSize = remaining;
-    }
-    while (remaining > 0) {
-      long len = Math.min(remaining, blockSize);
-      List<BlockReplica> replicas = Lists.newArrayList(
-          new BlockReplica(hostIndex_.getIndex(REMOTE_NETWORK_ADDRESS), false));
-      fd.addFileBlock(new FileBlock(start, len, replicas));
-      remaining -= len;
-      start += len;
-    }
-  }
-
-  /**
-   * Populates disk/volume ID metadata inside the newly created THdfsFileBlocks.
-   * perFsFileBlocks maps from each filesystem to a FileBLocksInfo.  The first list
-   * contains the newly created THdfsFileBlocks and the second contains the
-   * corresponding BlockLocations.
-   */
-  private void loadDiskIds(Map<FsKey, FileBlocksInfo> perFsFileBlocks) {
-    if (!SUPPORTS_VOLUME_ID) return;
-    // Loop over each filesystem.  If the filesystem is DFS, retrieve the volume IDs
-    // for all the blocks.
-    for (FsKey fsKey: perFsFileBlocks.keySet()) {
-      FileSystem fs = fsKey.filesystem;
-      // Only DistributedFileSystem has getFileBlockStorageLocations().  It's not even
-      // part of the FileSystem interface, so we'll need to downcast.
-      if (!(fs instanceof DistributedFileSystem)) continue;
-
-      LOG.trace("Loading disk ids for: " + getFullName() + ". nodes: " +
-          hostIndex_.size() + ". filesystem: " + fsKey);
-      DistributedFileSystem dfs = (DistributedFileSystem)fs;
-      FileBlocksInfo blockLists = perFsFileBlocks.get(fsKey);
-      Preconditions.checkNotNull(blockLists);
-      BlockStorageLocation[] storageLocs = null;
-      try {
-        // Get the BlockStorageLocations for all the blocks
-        storageLocs = dfs.getFileBlockStorageLocations(blockLists.locations);
-      } catch (IOException e) {
-        LOG.error("Couldn't determine block storage locations for filesystem " +
-            fs + ":\n" + e.getMessage());
-        continue;
-      }
-      if (storageLocs == null || storageLocs.length == 0) {
-        LOG.warn("Attempted to get block locations for filesystem " + fs +
-            " but the call returned no results");
-        continue;
-      }
-      if (storageLocs.length != blockLists.locations.size()) {
-        // Block locations and storage locations didn't match up.
-        LOG.error("Number of block storage locations not equal to number of blocks: "
-            + "#storage locations=" + Long.toString(storageLocs.length)
-            + " #blocks=" + Long.toString(blockLists.locations.size()));
-        continue;
-      }
-      long unknownDiskIdCount = 0;
-      // Attach volume IDs given by the storage location to the corresponding
-      // THdfsFileBlocks.
-      for (int locIdx = 0; locIdx < storageLocs.length; ++locIdx) {
-        VolumeId[] volumeIds = storageLocs[locIdx].getVolumeIds();
-        THdfsFileBlock block = blockLists.blocks.get(locIdx);
-        // Convert opaque VolumeId to 0 based ids.
-        // TODO: the diskId should be eventually retrievable from Hdfs when the
-        // community agrees this API is useful.
-        int[] diskIds = new int[volumeIds.length];
-        for (int i = 0; i < volumeIds.length; ++i) {
-          diskIds[i] = getDiskId(volumeIds[i]);
-          if (diskIds[i] < 0) ++unknownDiskIdCount;
-        }
-        FileBlock.setDiskIds(diskIds, block);
-      }
-      if (unknownDiskIdCount > 0) {
-        LOG.warn("Unknown disk id count for filesystem " + fs + ":" + unknownDiskIdCount);
-      }
-    }
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.TABLE;
-  }
-  public boolean isMarkedCached() { return isMarkedCached_; }
-
-  public Collection<HdfsPartition> getPartitions() { return partitionMap_.values(); }
-  public Map<Long, HdfsPartition> getPartitionMap() { return partitionMap_; }
-  public Map<String, HdfsPartition> getNameToPartitionMap() {
-    return nameToPartitionMap_;
-  }
-  public Set<Long> getNullPartitionIds(int i) { return nullPartitionIds_.get(i); }
-  public HdfsPartitionLocationCompressor getPartitionLocationCompressor() {
-    return partitionLocationCompressor_;
-  }
-  public Set<Long> getPartitionIds() { return partitionIds_; }
-  public TreeMap<LiteralExpr, HashSet<Long>> getPartitionValueMap(int i) {
-    return partitionValuesMap_.get(i);
-  }
-
-  /**
-   * Returns the value Hive is configured to use for NULL partition key values.
-   * Set during load.
-   */
-  public String getNullPartitionKeyValue() { return nullPartitionKeyValue_; }
-  public String getNullColumnValue() { return nullColumnValue_; }
-
-  /*
-   * Returns the storage location (HDFS path) of this table.
-   */
-  public String getLocation() {
-    return super.getMetaStoreTable().getSd().getLocation();
-  }
-
-  List<FieldSchema> getNonPartitionFieldSchemas() { return nonPartFieldSchemas_; }
-
-  // True if Impala has HDFS write permissions on the hdfsBaseDir (for an unpartitioned
-  // table) or if Impala has write permissions on all partition directories (for
-  // a partitioned table).
-  public boolean hasWriteAccess() {
-    return TAccessLevelUtil.impliesWriteAccess(accessLevel_);
-  }
-
-  /**
-   * Returns the first location (HDFS path) that Impala does not have WRITE access
-   * to, or an null if none is found. For an unpartitioned table, this just
-   * checks the hdfsBaseDir. For a partitioned table it checks all partition directories.
-   */
-  public String getFirstLocationWithoutWriteAccess() {
-    if (getMetaStoreTable() == null) return null;
-
-    if (getMetaStoreTable().getPartitionKeysSize() == 0) {
-      if (!TAccessLevelUtil.impliesWriteAccess(accessLevel_)) {
-        return hdfsBaseDir_;
-      }
-    } else {
-      for (HdfsPartition partition: partitionMap_.values()) {
-        if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
-          return partition.getLocation();
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Gets the HdfsPartition matching the given partition spec. Returns null if no match
-   * was found.
-   */
-  public HdfsPartition getPartition(
-      List<PartitionKeyValue> partitionSpec) {
-    List<TPartitionKeyValue> partitionKeyValues = Lists.newArrayList();
-    for (PartitionKeyValue kv: partitionSpec) {
-      String value = PartitionKeyValue.getPartitionKeyValueString(
-          kv.getLiteralValue(), getNullPartitionKeyValue());
-      partitionKeyValues.add(new TPartitionKeyValue(kv.getColName(), value));
-    }
-    return getPartitionFromThriftPartitionSpec(partitionKeyValues);
-  }
-
-  /**
-   * Gets the HdfsPartition matching the Thrift version of the partition spec.
-   * Returns null if no match was found.
-   */
-  public HdfsPartition getPartitionFromThriftPartitionSpec(
-      List<TPartitionKeyValue> partitionSpec) {
-    // First, build a list of the partition values to search for in the same order they
-    // are defined in the table.
-    List<String> targetValues = Lists.newArrayList();
-    Set<String> keys = Sets.newHashSet();
-    for (FieldSchema fs: getMetaStoreTable().getPartitionKeys()) {
-      for (TPartitionKeyValue kv: partitionSpec) {
-        if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
-          targetValues.add(kv.getValue().toLowerCase());
-          // Same key was specified twice
-          if (!keys.add(kv.getName().toLowerCase())) {
-            return null;
-          }
-        }
-      }
-    }
-
-    // Make sure the number of values match up and that some values were found.
-    if (targetValues.size() == 0 ||
-        (targetValues.size() != getMetaStoreTable().getPartitionKeysSize())) {
-      return null;
-    }
-
-    // Search through all the partitions and check if their partition key values
-    // match the values being searched for.
-    for (HdfsPartition partition: partitionMap_.values()) {
-      if (partition.isDefaultPartition()) continue;
-      List<LiteralExpr> partitionValues = partition.getPartitionValues();
-      Preconditions.checkState(partitionValues.size() == targetValues.size());
-      boolean matchFound = true;
-      for (int i = 0; i < targetValues.size(); ++i) {
-        String value;
-        if (partitionValues.get(i) instanceof NullLiteral) {
-          value = getNullPartitionKeyValue();
-        } else {
-          value = partitionValues.get(i).getStringValue();
-          Preconditions.checkNotNull(value);
-          // See IMPALA-252: we deliberately map empty strings on to
-          // NULL when they're in partition columns. This is for
-          // backwards compatibility with Hive, and is clearly broken.
-          if (value.isEmpty()) value = getNullPartitionKeyValue();
-        }
-        if (!targetValues.get(i).equals(value.toLowerCase())) {
-          matchFound = false;
-          break;
-        }
-      }
-      if (matchFound) {
-        return partition;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Create columns corresponding to fieldSchemas. Throws a TableLoadingException if the
-   * metadata is incompatible with what we support.
-   */
-  private void addColumnsFromFieldSchemas(List<FieldSchema> fieldSchemas)
-      throws TableLoadingException {
-    int pos = colsByPos_.size();
-    for (FieldSchema s: fieldSchemas) {
-      Type type = parseColumnType(s);
-      // Check if we support partitioning on columns of such a type.
-      if (pos < numClusteringCols_ && !type.supportsTablePartitioning()) {
-        throw new TableLoadingException(
-            String.format("Failed to load metadata for table '%s' because of " +
-                "unsupported partition-column type '%s' in partition column '%s'",
-                getFullName(), type.toString(), s.getName()));
-      }
-
-      Column col = new Column(s.getName(), type, s.getComment(), pos);
-      addColumn(col);
-      ++pos;
-    }
-  }
-
-  /**
-   * Clear the partitions of an HdfsTable and the associated metadata.
-   */
-  private void resetPartitions() {
-    partitionIds_.clear();
-    partitionMap_.clear();
-    nameToPartitionMap_.clear();
-    partitionValuesMap_.clear();
-    nullPartitionIds_.clear();
-    perPartitionFileDescMap_.clear();
-    // Initialize partitionValuesMap_ and nullPartitionIds_. Also reset column stats.
-    for (int i = 0; i < numClusteringCols_; ++i) {
-      getColumns().get(i).getStats().setNumNulls(0);
-      getColumns().get(i).getStats().setNumDistinctValues(0);
-      partitionValuesMap_.add(Maps.<LiteralExpr, HashSet<Long>>newTreeMap());
-      nullPartitionIds_.add(Sets.<Long>newHashSet());
-    }
-    numHdfsFiles_ = 0;
-    totalHdfsBytes_ = 0;
-  }
-
-  /**
-   * Resets any partition metadata, creates the default partition and sets the base
-   * table directory path as well as the caching info from the HMS table.
-   */
-  private void initializePartitionMetadata(
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws CatalogException {
-    Preconditions.checkNotNull(msTbl);
-    resetPartitions();
-    hdfsBaseDir_ = msTbl.getSd().getLocation();
-    // INSERT statements need to refer to this if they try to write to new partitions
-    // Scans don't refer to this because by definition all partitions they refer to
-    // exist.
-    addDefaultPartition(msTbl.getSd());
-
-    // We silently ignore cache directives that no longer exist in HDFS, and remove
-    // non-existing cache directives from the parameters.
-    isMarkedCached_ = HdfsCachingUtil.validateCacheParams(msTbl.getParameters());
-  }
-
-  /**
-   * Create HdfsPartition objects corresponding to 'msPartitions' and add them to this
-   * table's partition list. Any partition metadata will be reset and loaded from
-   * scratch.
-   *
-   * If there are no partitions in the Hive metadata, a single partition is added with no
-   * partition keys.
-   */
-  private void loadAllPartitions(
-      List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws IOException,
-      CatalogException {
-    Preconditions.checkNotNull(msTbl);
-    initializePartitionMetadata(msTbl);
-    // Map of filesystem to the file blocks for new/modified FileDescriptors. Blocks in
-    // this map will have their disk volume IDs information (re)loaded. This is used to
-    // speed up the incremental refresh of a table's metadata by skipping unmodified,
-    // previously loaded blocks.
-    Map<FsKey, FileBlocksInfo> blocksToLoad = Maps.newHashMap();
-    if (msTbl.getPartitionKeysSize() == 0) {
-      Preconditions.checkArgument(msPartitions == null || msPartitions.isEmpty());
-      // This table has no partition key, which means it has no declared partitions.
-      // We model partitions slightly differently to Hive - every file must exist in a
-      // partition, so add a single partition with no keys which will get all the
-      // files in the table's root directory.
-      HdfsPartition part = createPartition(msTbl.getSd(), null, blocksToLoad);
-      if (isMarkedCached_) part.markCached();
-      addPartition(part);
-      Path location = new Path(hdfsBaseDir_);
-      FileSystem fs = location.getFileSystem(CONF);
-      if (fs.exists(location)) {
-        accessLevel_ = getAvailableAccessLevel(fs, location);
-      }
-    } else {
-      for (org.apache.hadoop.hive.metastore.api.Partition msPartition: msPartitions) {
-        HdfsPartition partition = createPartition(msPartition.getSd(), msPartition,
-            blocksToLoad);
-        addPartition(partition);
-        // If the partition is null, its HDFS path does not exist, and it was not added
-        // to this table's partition list. Skip the partition.
-        if (partition == null) continue;
-        if (msPartition.getParameters() != null) {
-          partition.setNumRows(getRowCount(msPartition.getParameters()));
-        }
-        if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
-          // TODO: READ_ONLY isn't exactly correct because the it's possible the
-          // partition does not have READ permissions either. When we start checking
-          // whether we can READ from a table, this should be updated to set the
-          // table's access level to the "lowest" effective level across all
-          // partitions. That is, if one partition has READ_ONLY and another has
-          // WRITE_ONLY the table's access level should be NONE.
-          accessLevel_ = TAccessLevel.READ_ONLY;
-        }
-      }
-    }
-    loadDiskIds(blocksToLoad);
-  }
-
-  /**
-   * Gets the AccessLevel that is available for Impala for this table based on the
-   * permissions Impala has on the given path. If the path does not exist, recurses up
-   * the path until a existing parent directory is found, and inherit access permissions
-   * from that.
-   */
-  private TAccessLevel getAvailableAccessLevel(FileSystem fs, Path location)
-      throws IOException {
-    FsPermissionChecker permissionChecker = FsPermissionChecker.getInstance();
-    while (location != null) {
-      if (fs.exists(location)) {
-        FsPermissionChecker.Permissions perms =
-            permissionChecker.getPermissions(fs, location);
-        if (perms.canReadAndWrite()) {
-          return TAccessLevel.READ_WRITE;
-        } else if (perms.canRead()) {
-          return TAccessLevel.READ_ONLY;
-        } else if (perms.canWrite()) {
-          return TAccessLevel.WRITE_ONLY;
-        }
-        return TAccessLevel.NONE;
-      }
-      location = location.getParent();
-    }
-    // Should never get here.
-    Preconditions.checkNotNull(location, "Error: no path ancestor exists");
-    return TAccessLevel.NONE;
-  }
-
-  /**
-   * Creates a new HdfsPartition object to be added to HdfsTable's partition list.
-   * Partitions may be empty, or may not even exist in the filesystem (a partition's
-   * location may have been changed to a new path that is about to be created by an
-   * INSERT). Also loads the block metadata for this partition. Returns new partition
-   * if successful or null if none was created.
-   *
-   * Throws CatalogException if the supplied storage descriptor contains metadata that
-   * Impala can't understand.
-   */
-  public HdfsPartition createPartition(StorageDescriptor storageDescriptor,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition)
-      throws CatalogException {
-    Map<FsKey, FileBlocksInfo> blocksToLoad = Maps.newHashMap();
-    HdfsPartition hdfsPartition = createPartition(storageDescriptor, msPartition,
-        blocksToLoad);
-    loadDiskIds(blocksToLoad);
-    return hdfsPartition;
-  }
-
-  /**
-   * Creates a new HdfsPartition from a specified StorageDescriptor and an HMS partition
-   * object. It populates 'perFsFileBlock' with the blocks to be loaded for each file in
-   * the partition directory.
-   */
-  private HdfsPartition createPartition(StorageDescriptor storageDescriptor,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition,
-      Map<FsKey, FileBlocksInfo> perFsFileBlocks)
-      throws CatalogException {
-    HdfsStorageDescriptor fileFormatDescriptor =
-        HdfsStorageDescriptor.fromStorageDescriptor(this.name_, storageDescriptor);
-    List<LiteralExpr> keyValues = Lists.newArrayList();
-    boolean isMarkedCached = isMarkedCached_;
-    if (msPartition != null) {
-      isMarkedCached = HdfsCachingUtil.validateCacheParams(msPartition.getParameters());
-      // Load key values
-      for (String partitionKey: msPartition.getValues()) {
-        Type type = getColumns().get(keyValues.size()).getType();
-        // Deal with Hive's special NULL partition key.
-        if (partitionKey.equals(nullPartitionKeyValue_)) {
-          keyValues.add(NullLiteral.create(type));
-        } else {
-          try {
-            keyValues.add(LiteralExpr.create(partitionKey, type));
-          } catch (Exception ex) {
-            LOG.warn("Failed to create literal expression of type: " + type, ex);
-            throw new CatalogException("Invalid partition key value of type: " + type,
-                ex);
-          }
-        }
-      }
-      try {
-        Expr.analyze(keyValues, null);
-      } catch (AnalysisException e) {
-        // should never happen
-        throw new IllegalStateException(e);
-      }
-    }
-
-    Path partDirPath = new Path(storageDescriptor.getLocation());
-    try {
-      FileSystem fs = partDirPath.getFileSystem(CONF);
-      multipleFileSystems_ = multipleFileSystems_ ||
-          !FileSystemUtil.isPathOnFileSystem(new Path(getLocation()), fs);
-      updatePartitionFds(partDirPath, isMarkedCached,
-          fileFormatDescriptor.getFileFormat(), perFsFileBlocks);
-      HdfsPartition partition =
-          new HdfsPartition(this, msPartition, keyValues, fileFormatDescriptor,
-              perPartitionFileDescMap_.get(partDirPath.toString()).values(),
-              getAvailableAccessLevel(fs, partDirPath));
-      partition.checkWellFormed();
-      return partition;
-    } catch (IOException e) {
-      throw new CatalogException("Error initializing partition", e);
-    }
-  }
-
-  /**
-   * Add the given THdfsFileBlocks and BlockLocations to the FileBlockInfo for the
-   * given filesystem.
-   */
-  private void addPerFsFileBlocks(Map<FsKey, FileBlocksInfo> fsToBlocks, FileSystem fs,
-      List<THdfsFileBlock> blocks, List<BlockLocation> locations) {
-    FsKey fsKey = new FsKey(fs);
-    FileBlocksInfo infos = fsToBlocks.get(fsKey);
-    if (infos == null) {
-      infos = new FileBlocksInfo();
-      fsToBlocks.put(fsKey, infos);
-    }
-    infos.addBlocks(blocks, locations);
-  }
-
-  /**
-   * Adds the partition to the HdfsTable. Throws a CatalogException if the partition
-   * already exists in this table.
-   */
-  public void addPartition(HdfsPartition partition) throws CatalogException {
-    if (partitionMap_.containsKey(partition.getId())) {
-      throw new CatalogException(String.format("Partition %s already exists in table %s",
-          partition.getPartitionName(), getFullName()));
-    }
-    partitionMap_.put(partition.getId(), partition);
-    totalHdfsBytes_ += partition.getSize();
-    numHdfsFiles_ += partition.getNumFileDescriptors();
-    updatePartitionMdAndColStats(partition);
-  }
-
-  /**
-   * Updates the HdfsTable's partition metadata, i.e. adds the id to the HdfsTable and
-   * populates structures used for speeding up partition pruning/lookup. Also updates
-   * column stats.
-   */
-  private void updatePartitionMdAndColStats(HdfsPartition partition) {
-    if (partition.getPartitionValues().size() != numClusteringCols_) return;
-    partitionIds_.add(partition.getId());
-    for (int i = 0; i < partition.getPartitionValues().size(); ++i) {
-      ColumnStats stats = getColumns().get(i).getStats();
-      LiteralExpr literal = partition.getPartitionValues().get(i);
-      // Store partitions with null partition values separately
-      if (literal instanceof NullLiteral) {
-        stats.setNumNulls(stats.getNumNulls() + 1);
-        if (nullPartitionIds_.get(i).isEmpty()) {
-          stats.setNumDistinctValues(stats.getNumDistinctValues() + 1);
-        }
-        nullPartitionIds_.get(i).add(partition.getId());
-        continue;
-      }
-      HashSet<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
-      if (partitionIds == null) {
-        partitionIds = Sets.newHashSet();
-        partitionValuesMap_.get(i).put(literal, partitionIds);
-        stats.setNumDistinctValues(stats.getNumDistinctValues() + 1);
-      }
-      partitionIds.add(partition.getId());
-    }
-    nameToPartitionMap_.put(partition.getPartitionName(), partition);
-  }
-
-  /**
-   * Drops the partition having the given partition spec from HdfsTable. Cleans up its
-   * metadata from all the mappings used to speed up partition pruning/lookup.
-   * Also updates partition column statistics. Given partitionSpec must match exactly
-   * one partition.
-   * Returns the HdfsPartition that was dropped. If the partition does not exist, returns
-   * null.
-   */
-  public HdfsPartition dropPartition(List<TPartitionKeyValue> partitionSpec) {
-    return dropPartition(getPartitionFromThriftPartitionSpec(partitionSpec));
-  }
-
-  /**
-   * Drops a partition and updates partition column statistics. Returns the
-   * HdfsPartition that was dropped or null if the partition does not exist.
-   */
-  private HdfsPartition dropPartition(HdfsPartition partition) {
-    if (partition == null) return null;
-    totalHdfsBytes_ -= partition.getSize();
-    numHdfsFiles_ -= partition.getNumFileDescriptors();
-    Preconditions.checkArgument(partition.getPartitionValues().size() ==
-        numClusteringCols_);
-    Long partitionId = partition.getId();
-    // Remove the partition id from the list of partition ids and other mappings.
-    partitionIds_.remove(partitionId);
-    partitionMap_.remove(partitionId);
-    nameToPartitionMap_.remove(partition.getPartitionName());
-    perPartitionFileDescMap_.remove(partition.getLocation());
-    for (int i = 0; i < partition.getPartitionValues().size(); ++i) {
-      ColumnStats stats = getColumns().get(i).getStats();
-      LiteralExpr literal = partition.getPartitionValues().get(i);
-      // Check if this is a null literal.
-      if (literal instanceof NullLiteral) {
-        nullPartitionIds_.get(i).remove(partitionId);
-        stats.setNumNulls(stats.getNumNulls() - 1);
-        if (nullPartitionIds_.get(i).isEmpty()) {
-          stats.setNumDistinctValues(stats.getNumDistinctValues() - 1);
-        }
-        continue;
-      }
-      HashSet<Long> partitionIds = partitionValuesMap_.get(i).get(literal);
-      // If there are multiple partition ids corresponding to a literal, remove
-      // only this id. Otherwise, remove the <literal, id> pair.
-      if (partitionIds.size() > 1) partitionIds.remove(partitionId);
-      else {
-        partitionValuesMap_.get(i).remove(literal);
-        stats.setNumDistinctValues(stats.getNumDistinctValues() - 1);
-      }
-    }
-    return partition;
-  }
-
-  private void addDefaultPartition(StorageDescriptor storageDescriptor)
-      throws CatalogException {
-    // Default partition has no files and is not referred to by scan nodes. Data sinks
-    // refer to this to understand how to create new partitions.
-    HdfsStorageDescriptor hdfsStorageDescriptor =
-        HdfsStorageDescriptor.fromStorageDescriptor(this.name_, storageDescriptor);
-    HdfsPartition partition = HdfsPartition.defaultPartition(this,
-        hdfsStorageDescriptor);
-    partitionMap_.put(partition.getId(), partition);
-  }
-
-  @Override
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    load(reuseMetadata, client, msTbl, true, true, null);
-  }
-
-  /**
-   * Loads table metadata from the Hive Metastore.
-   *
-   * If 'reuseMetadata' is false, performs a full metadata load from the Hive Metastore,
-   * including partition and file metadata. Otherwise, loads metadata incrementally and
-   * updates this HdfsTable in place so that it is in sync with the Hive Metastore.
-   *
-   * Depending on the operation that triggered the table metadata load, not all the
-   * metadata may need to be updated. If 'partitionsToUpdate' is not null, it specifies a
-   * list of partitions for which metadata should be updated. Otherwise, all partition
-   * metadata will be updated from the Hive Metastore.
-   *
-   * If 'loadFileMetadata' is true, file metadata of the specified partitions are
-   * reloaded while reusing existing file descriptors to avoid loading metadata for files
-   * that haven't changed. If 'partitionsToUpdate' is not specified, file metadata of all
-   * the partitions are loaded.
-   *
-   * If 'loadTableSchema' is true, the table schema is loaded from the Hive Metastore.
-   *
-   * There are several cases where existing file descriptors might be reused incorrectly:
-   * 1. an ALTER TABLE ADD PARTITION or dynamic partition insert is executed through
-   *    Hive. This does not update the lastDdlTime.
-   * 2. Hdfs rebalancer is executed. This changes the block locations but doesn't update
-   *    the mtime (file modification time).
-   * If any of these occur, user has to execute "invalidate metadata" to invalidate the
-   * metadata cache of the table and trigger a fresh load.
-   */
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl, boolean loadFileMetadata,
-      boolean loadTableSchema, Set<String> partitionsToUpdate)
-      throws TableLoadingException {
-    // turn all exceptions into TableLoadingException
-    msTable_ = msTbl;
-    try {
-      if (loadTableSchema) loadSchema(client, msTbl);
-      if (reuseMetadata && getCatalogVersion() == Catalog.INITIAL_CATALOG_VERSION) {
-        // This is the special case of CTAS that creates a 'temp' table that does not
-        // actually exist in the Hive Metastore.
-        initializePartitionMetadata(msTbl);
-        updateStatsFromHmsTable(msTbl);
-        return;
-      }
-      // Load partition and file metadata
-      if (!reuseMetadata) {
-        // Load all partitions from Hive Metastore, including file metadata.
-        LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + name_);
-        List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions =
-            Lists.newArrayList();
-        msPartitions.addAll(MetaStoreUtil.fetchAllPartitions(
-            client, db_.getName(), name_, NUM_PARTITION_FETCH_RETRIES));
-        loadAllPartitions(msPartitions, msTbl);
-      } else {
-        // Incrementally update this table's partitions and file metadata
-        LOG.debug("incremental update for table: " + db_.getName() + "." + name_);
-        Preconditions.checkState(partitionsToUpdate == null || loadFileMetadata);
-        updateMdFromHmsTable(msTbl);
-        if (msTbl.getPartitionKeysSize() == 0) {
-          if (loadFileMetadata) updateUnpartitionedTableFileMd();
-        } else {
-          updatePartitionsFromHms(client, partitionsToUpdate, loadFileMetadata);
-        }
-      }
-      if (loadTableSchema) setAvroSchema(client, msTbl);
-      updateStatsFromHmsTable(msTbl);
-    } catch (TableLoadingException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new TableLoadingException("Failed to load metadata for table: " + name_, e);
-    }
-  }
-
-  /**
-   * Updates the table metadata, including 'hdfsBaseDir_', 'isMarkedCached_',
-   * and 'accessLevel_' from 'msTbl'. Throws an IOException if there was an error
-   * accessing the table location path.
-   */
-  private void updateMdFromHmsTable(org.apache.hadoop.hive.metastore.api.Table msTbl)
-      throws IOException {
-    Preconditions.checkNotNull(msTbl);
-    hdfsBaseDir_ = msTbl.getSd().getLocation();
-    isMarkedCached_ = HdfsCachingUtil.validateCacheParams(msTbl.getParameters());
-    if (msTbl.getPartitionKeysSize() == 0) {
-      Path location = new Path(hdfsBaseDir_);
-      FileSystem fs = location.getFileSystem(CONF);
-      if (fs.exists(location)) {
-        accessLevel_ = getAvailableAccessLevel(fs, location);
-      }
-    }
-    setMetaStoreTable(msTbl);
-  }
-
-  /**
-   * Updates the file metadata of an unpartitioned HdfsTable.
-   */
-  private void updateUnpartitionedTableFileMd() throws CatalogException {
-    LOG.debug("update unpartitioned table: " + name_);
-    resetPartitions();
-    org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
-    Preconditions.checkNotNull(msTbl);
-    addDefaultPartition(msTbl.getSd());
-    Map<FsKey, FileBlocksInfo> fileBlocksToLoad = Maps.newHashMap();
-    HdfsPartition part = createPartition(msTbl.getSd(), null, fileBlocksToLoad);
-    addPartition(part);
-    loadDiskIds(fileBlocksToLoad);
-    if (isMarkedCached_) part.markCached();
-  }
-
-  /**
-   * Updates the partitions of an HdfsTable so that they are in sync with the Hive
-   * Metastore. It reloads partitions that were marked 'dirty' by doing a DROP + CREATE.
-   * It removes from this table partitions that no longer exist in the Hive Metastore and
-   * adds partitions that were added externally (e.g. using Hive) to the Hive Metastore
-   * but do not exist in this table. If 'loadFileMetadata' is true, it triggers
-   * file/block metadata reload for the partitions specified in 'partitionsToUpdate', if
-   * any, or for all the table partitions if 'partitionsToUpdate' is null.
-   */
-  private void updatePartitionsFromHms(IMetaStoreClient client,
-      Set<String> partitionsToUpdate, boolean loadFileMetadata) throws Exception {
-    LOG.debug("sync table partitions: " + name_);
-    org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
-    Preconditions.checkNotNull(msTbl);
-    Preconditions.checkState(msTbl.getPartitionKeysSize() != 0);
-    Preconditions.checkState(loadFileMetadata || partitionsToUpdate == null);
-
-    // Retrieve all the partition names from the Hive Metastore. We need this to
-    // identify the delta between partitions of the local HdfsTable and the table entry
-    // in the Hive Metastore. Note: This is a relatively "cheap" operation
-    // (~.3 secs for 30K partitions).
-    Set<String> msPartitionNames = Sets.newHashSet();
-    msPartitionNames.addAll(
-        client.listPartitionNames(db_.getName(), name_, (short) -1));
-    // Names of loaded partitions in this table
-    Set<String> partitionNames = Sets.newHashSet();
-    // Partitions for which file metadata must be loaded
-    List<HdfsPartition> partitionsToUpdateFileMd = Lists.newArrayList();
-    // Partitions that need to be dropped and recreated from scratch
-    List<HdfsPartition> dirtyPartitions = Lists.newArrayList();
-    // Partitions that need to be removed from this table. That includes dirty
-    // partitions as well as partitions that were removed from the Hive Metastore.
-    List<HdfsPartition> partitionsToRemove = Lists.newArrayList();
-    // Identify dirty partitions that need to be loaded from the Hive Metastore and
-    // partitions that no longer exist in the Hive Metastore.
-    for (HdfsPartition partition: partitionMap_.values()) {
-      // Ignore the default partition
-      if (partition.isDefaultPartition()) continue;
-      // Remove partitions that don't exist in the Hive Metastore. These are partitions
-      // that were removed from HMS using some external process, e.g. Hive.
-      if (!msPartitionNames.contains(partition.getPartitionName())) {
-        partitionsToRemove.add(partition);
-      }
-      if (partition.isDirty()) {
-        // Dirty partitions are updated by removing them from table's partition
-        // list and loading them from the Hive Metastore.
-        dirtyPartitions.add(partition);
-      } else {
-        if (partitionsToUpdate == null && loadFileMetadata) {
-          partitionsToUpdateFileMd.add(partition);
-        }
-      }
-      Preconditions.checkNotNull(partition.getCachedMsPartitionDescriptor());
-      partitionNames.add(partition.getPartitionName());
-    }
-    partitionsToRemove.addAll(dirtyPartitions);
-    for (HdfsPartition partition: partitionsToRemove) dropPartition(partition);
-    // Load dirty partitions from Hive Metastore
-    loadPartitionsFromMetastore(dirtyPartitions, client);
-
-    // Identify and load partitions that were added in the Hive Metastore but don't
-    // exist in this table.
-    Set<String> newPartitionsInHms = Sets.difference(msPartitionNames, partitionNames);
-    loadPartitionsFromMetastore(newPartitionsInHms, client);
-    // If a list of modified partitions (old and new) is specified, don't reload file
-    // metadata for the new ones as they have already been detected in HMS and have been
-    // reloaded by loadPartitionsFromMetastore().
-    if (partitionsToUpdate != null) {
-      partitionsToUpdate.removeAll(newPartitionsInHms);
-    }
-
-    // Load file metadata. Until we have a notification mechanism for when a
-    // file changes in hdfs, it is sometimes required to reload all the file
-    // descriptors and block metadata of a table (e.g. REFRESH statement).
-    if (loadFileMetadata) {
-      if (partitionsToUpdate != null) {
-        // Only reload file metadata of partitions specified in 'partitionsToUpdate'
-        Preconditions.checkState(partitionsToUpdateFileMd.isEmpty());
-        partitionsToUpdateFileMd = getPartitionsByName(partitionsToUpdate);
-      }
-      loadPartitionFileMetadata(partitionsToUpdateFileMd);
-    }
-  }
-
-  /**
-   * Returns the HdfsPartition objects associated with the specified list of partition
-   * names.
-   */
-  private List<HdfsPartition> getPartitionsByName(Collection<String> partitionNames) {
-    List<HdfsPartition> partitions = Lists.newArrayList();
-    for (String partitionName: partitionNames) {
-      String partName = DEFAULT_PARTITION_NAME;
-      if (partitionName.length() > 0) {
-        // Trim the last trailing char '/' from each partition name
-        partName = partitionName.substring(0, partitionName.length()-1);
-      }
-      Preconditions.checkState(nameToPartitionMap_.containsKey(partName),
-          "Invalid partition name: " + partName);
-      partitions.add(nameToPartitionMap_.get(partName));
-    }
-    return partitions;
-  }
-
-  /**
-   * Updates the cardinality of this table from an HMS table. Sets the cardinalities of
-   * dummy/default partitions for the case of unpartitioned tables.
-   */
-  private void updateStatsFromHmsTable(
-      org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    numRows_ = getRowCount(msTbl.getParameters());
-    // For unpartitioned tables set the numRows in its partitions
-    // to the table's numRows.
-    if (numClusteringCols_ == 0 && !partitionMap_.isEmpty()) {
-      // Unpartitioned tables have a 'dummy' partition and a default partition.
-      // Temp tables used in CTAS statements have one partition.
-      Preconditions.checkState(partitionMap_.size() == 2 || partitionMap_.size() == 1);
-      for (HdfsPartition p: partitionMap_.values()) {
-        p.setNumRows(numRows_);
-      }
-    }
-  }
-
-  /**
-   * Returns whether the table has the 'skip.header.line.count' property set.
-   */
-  private boolean hasSkipHeaderLineCount() {
-    String key = TBL_PROP_SKIP_HEADER_LINE_COUNT;
-    org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
-    if (msTbl == null) return false;
-    String inputFormat = msTbl.getSd().getInputFormat();
-    return msTbl.getParameters().containsKey(key);
-  }
-
-  /**
-   * Parses and returns the value of the 'skip.header.line.count' table property. If the
-   * value is not set for the table, returns 0. If parsing fails or a value < 0 is found,
-   * the error parameter is updated to contain an error message.
-   */
-  public int parseSkipHeaderLineCount(StringBuilder error) {
-    if (!hasSkipHeaderLineCount()) return 0;
-    return parseSkipHeaderLineCount(getMetaStoreTable().getParameters(), error);
-  }
-
-  /**
-   * Parses and returns the value of the 'skip.header.line.count' table property. The
-   * caller must ensure that the property is contained in the 'tblProperties' map. If
-   * parsing fails or a value < 0 is found, the error parameter is updated to contain an
-   * error message.
-   */
-  public static int parseSkipHeaderLineCount(Map<String, String> tblProperties,
-      StringBuilder error) {
-    Preconditions.checkState(tblProperties != null);
-    String key = TBL_PROP_SKIP_HEADER_LINE_COUNT;
-    Preconditions.checkState(tblProperties.containsKey(key));
-    // Try to parse.
-    String string_value = tblProperties.get(key);
-    int skipHeaderLineCount = 0;
-    String error_msg = String.format("Invalid value for table property %s: %s (value " +
-        "must be an integer >= 0)", key, string_value);
-    try {
-      skipHeaderLineCount = Integer.parseInt(string_value);
-    } catch (NumberFormatException exc) {
-      error.append(error_msg);
-    }
-    if (skipHeaderLineCount < 0) error.append(error_msg);
-    return skipHeaderLineCount;
-  }
-
-  /**
-   * Sets avroSchema_ if the table or any of the partitions in the table are stored
-   * as Avro. Additionally, this method also reconciles the schema if the column
-   * definitions from the metastore differ from the Avro schema.
-   */
-  private void setAvroSchema(IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws Exception {
-    Preconditions.checkState(isSchemaLoaded_);
-    String inputFormat = msTbl.getSd().getInputFormat();
-    if (HdfsFileFormat.fromJavaClassName(inputFormat) == HdfsFileFormat.AVRO
-        || hasAvroData_) {
-      // Look for Avro schema in TBLPROPERTIES and in SERDEPROPERTIES, with the latter
-      // taking precedence.
-      List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
-      schemaSearchLocations.add(
-          getMetaStoreTable().getSd().getSerdeInfo().getParameters());
-      schemaSearchLocations.add(getMetaStoreTable().getParameters());
-
-      avroSchema_ = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
-
-      if (avroSchema_ == null) {
-        // No Avro schema was explicitly set in the table metadata, so infer the Avro
-        // schema from the column definitions.
-        Schema inferredSchema = AvroSchemaConverter.convertFieldSchemas(
-            msTbl.getSd().getCols(), getFullName());
-        avroSchema_ = inferredSchema.toString();
-      }
-      String serdeLib = msTbl.getSd().getSerdeInfo().getSerializationLib();
-      if (serdeLib == null ||
-          serdeLib.equals("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) {
-        // If the SerDe library is null or set to LazySimpleSerDe or is null, it
-        // indicates there is an issue with the table metadata since Avro table need a
-        // non-native serde. Instead of failing to load the table, fall back to
-        // using the fields from the storage descriptor (same as Hive).
-        return;
-      } else {
-        // Generate new FieldSchemas from the Avro schema. This step reconciles
-        // differences in the column definitions and the Avro schema. For
-        // Impala-created tables this step is not necessary because the same
-        // resolution is done during table creation. But Hive-created tables
-        // store the original column definitions, and not the reconciled ones.
-        List<ColumnDef> colDefs =
-            ColumnDef.createFromFieldSchemas(msTbl.getSd().getCols());
-        List<ColumnDef> avroCols = AvroSchemaParser.parse(avroSchema_);
-        StringBuilder warning = new StringBuilder();
-        List<ColumnDef> reconciledColDefs =
-            AvroSchemaUtils.reconcileSchemas(colDefs, avroCols, warning);
-        if (warning.length() != 0) {
-          LOG.warn(String.format("Warning while loading table %s:\n%s",
-              getFullName(), warning.toString()));
-        }
-        AvroSchemaUtils.setFromSerdeComment(reconciledColDefs);
-        // Reset and update nonPartFieldSchemas_ to the reconcicled colDefs.
-        nonPartFieldSchemas_.clear();
-        nonPartFieldSchemas_.addAll(ColumnDef.toFieldSchemas(reconciledColDefs));
-        // Update the columns as per the reconciled colDefs and re-load stats.
-        clearColumns();
-        addColumnsFromFieldSchemas(msTbl.getPartitionKeys());
-        addColumnsFromFieldSchemas(nonPartFieldSchemas_);
-        loadAllColumnStats(client);
-      }
-    }
-  }
-
-  /**
-   * Loads table schema and column stats from Hive Metastore.
-   */
-  private void loadSchema(IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws Exception {
-    nonPartFieldSchemas_.clear();
-    // set nullPartitionKeyValue from the hive conf.
-    nullPartitionKeyValue_ = client.getConfigValue(
-        "hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__");
-
-    // set NULL indicator string from table properties
-    nullColumnValue_ =
-        msTbl.getParameters().get(serdeConstants.SERIALIZATION_NULL_FORMAT);
-    if (nullColumnValue_ == null) nullColumnValue_ = DEFAULT_NULL_COLUMN_VALUE;
-
-    // Excludes partition columns.
-    nonPartFieldSchemas_.addAll(msTbl.getSd().getCols());
-
-    // The number of clustering columns is the number of partition keys.
-    numClusteringCols_ = msTbl.getPartitionKeys().size();
-    partitionLocationCompressor_.setClusteringColumns(numClusteringCols_);
-    clearColumns();
-    // Add all columns to the table. Ordering is important: partition columns first,
-    // then all other columns.
-    addColumnsFromFieldSchemas(msTbl.getPartitionKeys());
-    addColumnsFromFieldSchemas(nonPartFieldSchemas_);
-    loadAllColumnStats(client);
-    isSchemaLoaded_ = true;
-  }
-
-  /**
-   * Loads partitions from the Hive Metastore and adds them to the internal list of
-   * table partitions.
-   */
-  private void loadPartitionsFromMetastore(List<HdfsPartition> partitions,
-      IMetaStoreClient client) throws Exception {
-    Preconditions.checkNotNull(partitions);
-    if (partitions.isEmpty()) return;
-    LOG.info(String.format("Incrementally updating %d/%d partitions.",
-        partitions.size(), partitionMap_.size()));
-    Set<String> partitionNames = Sets.newHashSet();
-    for (HdfsPartition part: partitions) {
-      partitionNames.add(part.getPartitionName());
-    }
-    loadPartitionsFromMetastore(partitionNames, client);
-  }
-
-  /**
-   * Loads from the Hive Metastore the partitions that correspond to the specified
-   * 'partitionNames' and adds them to the internal list of table partitions.
-   */
-  private void loadPartitionsFromMetastore(Set<String> partitionNames,
-      IMetaStoreClient client) throws Exception {
-    Preconditions.checkNotNull(partitionNames);
-    if (partitionNames.isEmpty()) return;
-    // Load partition metadata from Hive Metastore.
-    List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions =
-        Lists.newArrayList();
-    msPartitions.addAll(MetaStoreUtil.fetchPartitionsByName(client,
-        Lists.newArrayList(partitionNames), db_.getName(), name_));
-
-    Map<FsKey, FileBlocksInfo> fileBlocksToLoad = Maps.newHashMap();
-    for (org.apache.hadoop.hive.metastore.api.Partition msPartition: msPartitions) {
-      HdfsPartition partition =
-          createPartition(msPartition.getSd(), msPartition, fileBlocksToLoad);
-      addPartition(partition);
-      // If the partition is null, its HDFS path does not exist, and it was not added to
-      // this table's partition list. Skip the partition.
-      if (partition == null) continue;
-      if (partition.getFileFormat() == HdfsFileFormat.AVRO) hasAvroData_ = true;
-      if (msPartition.getParameters() != null) {
-        partition.setNumRows(getRowCount(msPartition.getParameters()));
-      }
-      if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
-        // TODO: READ_ONLY isn't exactly correct because the it's possible the
-        // partition does not have READ permissions either. When we start checking
-        // whether we can READ from a table, this should be updated to set the
-        // table's access level to the "lowest" effective level across all
-        // partitions. That is, if one partition has READ_ONLY and another has
-        // WRITE_ONLY the table's access level should be NONE.
-        accessLevel_ = TAccessLevel.READ_ONLY;
-      }
-    }
-    loadDiskIds(fileBlocksToLoad);
-  }
-
-  /**
-   * Loads the file descriptors and block metadata of a list of partitions.
-   */
-  private void loadPartitionFileMetadata(List<HdfsPartition> partitions)
-      throws Exception {
-    Preconditions.checkNotNull(partitions);
-    LOG.info(String.format("loading file metadata for %d partitions",
-        partitions.size()));
-    org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
-    Preconditions.checkNotNull(msTbl);
-    HdfsStorageDescriptor fileFormatDescriptor =
-        HdfsStorageDescriptor.fromStorageDescriptor(this.name_, msTbl.getSd());
-    Map<FsKey, FileBlocksInfo> perFsFileBlocks = Maps.newHashMap();
-    for (HdfsPartition part: partitions) {
-      org.apache.hadoop.hive.metastore.api.Partition msPart =
-          part.toHmsPartition();
-      StorageDescriptor sd = null;
-      if (msPart == null) {
-        // If this partition is not stored in the Hive Metastore (e.g. default partition
-        // of an unpartitioned table), use the table's storage descriptor to load file
-        // metadata.
-        sd = msTbl.getSd();
-      } else {
-        sd = msPart.getSd();
-      }
-      loadPartitionFileMetadata(sd, part, fileFormatDescriptor.getFileFormat(),
-          perFsFileBlocks);
-    }
-    loadDiskIds(perFsFileBlocks);
-  }
-
-  /**
-   * Loads the file descriptors and block metadata of a partition from its
-   * StorageDescriptor. If 'partition' does not have an entry in the Hive Metastore,
-   * 'storageDescriptor' is the StorageDescriptor of the associated table. Populates
-   * 'perFsFileBlocks' with file block info and updates table metadata.
-   */
-  private void loadPartitionFileMetadata(StorageDescriptor storageDescriptor,
-      HdfsPartition partition, HdfsFileFormat fileFormat,
-      Map<FsKey, FileBlocksInfo> perFsFileBlocks) throws Exception {
-    Preconditions.checkNotNull(storageDescriptor);
-    Preconditions.checkNotNull(partition);
-    org.apache.hadoop.hive.metastore.api.Partition msPart =
-        partition.toHmsPartition();
-    boolean isMarkedCached = isMarkedCached_;
-    if (msPart != null) {
-      isMarkedCached = HdfsCachingUtil.validateCacheParams(msPart.getParameters());
-    }
-    Path partDirPath = new Path(storageDescriptor.getLocation());
-    FileSystem fs = partDirPath.getFileSystem(CONF);
-    if (!fs.exists(partDirPath)) return;
-
-    String partitionDir = partDirPath.toString();
-    numHdfsFiles_ -= partition.getNumFileDescriptors();
-    totalHdfsBytes_ -= partition.getSize();
-    Preconditions.checkState(numHdfsFiles_ >= 0 && totalHdfsBytes_ >= 0);
-    updatePartitionFds(partDirPath, isMarkedCached, fileFormat, perFsFileBlocks);
-    List<FileDescriptor> fileDescs = Lists.newArrayList(
-        perPartitionFileDescMap_.get(partDirPath.toString()).values());
-    partition.setFileDescriptors(fileDescs);
-    totalHdfsBytes_ += partition.getSize();
-    numHdfsFiles_ += fileDescs.size();
-  }
-
-  /**
-   * Updates the file descriptors of a partition directory specified by 'partitionPath'
-   * and loads block metadata of new/modified files. Reuses existing FileDescriptors for
-   * unchanged files (indicated by unchanged mtime). The one exception is if the
-   * partition is marked as cached (HDFS caching) in which case the block metadata
-   * cannot be reused. Otherwise, creates new FileDescriptors and adds them to
-   * perPartitionFileDescMap_. 'fileFomat' is the file format of the files in this
-   * partition directory. 'perFsFileBlocks' is populated with the loaded block metadata.
-   */
-  private void updatePartitionFds(Path partitionPath,
-      boolean isMarkedCached, HdfsFileFormat fileFormat,
-      Map<FsKey, FileBlocksInfo> perFsFileBlocks) throws CatalogException {
-    Preconditions.checkNotNull(partitionPath);
-    String partPathStr = partitionPath.toString();
-    try {
-      FileSystem fs = partitionPath.getFileSystem(CONF);
-      if (!fs.exists(partitionPath)) {
-        perPartitionFileDescMap_.put(
-            partPathStr, Maps.<String, FileDescriptor>newHashMap());
-        return;
-      }
-      Map<String, FileDescriptor> fileDescMap =
-          perPartitionFileDescMap_.get(partPathStr);
-      Map<String, FileDescriptor> newFileDescMap = Maps.newHashMap();
-      // Get all the files in the partition directory
-      for (FileStatus fileStatus: fs.listStatus(partitionPath)) {
-        String fileName = fileStatus.getPath().getName().toString();
-        if (fileStatus.isDirectory() || FileSystemUtil.isHiddenFile(fileName) ||
-          HdfsCompression.fromFileName(fileName) == HdfsCompression.LZO_INDEX) {
-          // Ignore directory, hidden file starting with . or _, and LZO index files
-          // If a directory is erroneously created as a subdirectory of a partition dir
-          // we should ignore it and move on. Hive will not recurse into directories.
-          // Skip index files, these are read by the LZO scanner directly.
-          continue;
-        }
-        FileDescriptor fd = fileDescMap != null ? fileDescMap.get(fileName) : null;
-        if (fd == null || isMarkedCached || fd.getFileLength() != fileStatus.getLen()
-          || fd.getModificationTime() != fileStatus.getModificationTime()) {
-          // Metadata of cached or modified files are not reused.
-          fd = new FileDescriptor(fileName, fileStatus.getLen(),
-              fileStatus.getModificationTime());
-          loadBlockMetadata(fs, fileStatus, fd, fileFormat, perFsFileBlocks);
-        }
-        newFileDescMap.put(fileName, fd);
-      }
-      perPartitionFileDescMap_.put(partPathStr, newFileDescMap);
-    } catch (Exception e) {
-      throw new CatalogException("Failed to retrieve file descriptors from path " +
-        partitionPath, e);
-    }
-  }
-
-  @Override
-  protected List<String> getColumnNamesWithHmsStats() {
-    List<String> ret = Lists.newArrayList();
-    // Only non-partition columns have column stats in the HMS.
-    for (Column column: getColumns().subList(numClusteringCols_, getColumns().size())) {
-      ret.add(column.getName().toLowerCase());
-    }
-    return ret;
-  }
-
-  @Override
-  protected synchronized void loadFromThrift(TTable thriftTable)
-      throws TableLoadingException {
-    super.loadFromThrift(thriftTable);
-    THdfsTable hdfsTable = thriftTable.getHdfs_table();
-    Preconditions.checkState(hdfsTable.getPartition_prefixes() instanceof ArrayList<?>);
-    partitionLocationCompressor_ = new HdfsPartitionLocationCompressor(
-        numClusteringCols_, (ArrayList<String>)hdfsTable.getPartition_prefixes());
-    hdfsBaseDir_ = hdfsTable.getHdfsBaseDir();
-    nullColumnValue_ = hdfsTable.nullColumnValue;
-    nullPartitionKeyValue_ = hdfsTable.nullPartitionKeyValue;
-    multipleFileSystems_ = hdfsTable.multiple_filesystems;
-    Preconditions.checkState(hdfsTable.getNetwork_addresses() instanceof ArrayList<?>);
-    hostIndex_.populate((ArrayList<TNetworkAddress>)hdfsTable.getNetwork_addresses());
-    resetPartitions();
-
-    try {
-      for (Map.Entry<Long, THdfsPartition> part: hdfsTable.getPartitions().entrySet()) {
-        HdfsPartition hdfsPart =
-            HdfsPartition.fromThrift(this, part.getKey(), part.getValue());
-        addPartition(hdfsPart);
-      }
-    } catch (CatalogException e) {
-      throw new TableLoadingException(e.getMessage());
-    }
-    avroSchema_ = hdfsTable.isSetAvroSchema() ? hdfsTable.getAvroSchema() : null;
-    isMarkedCached_ =
-      HdfsCachingUtil.validateCacheParams(getMetaStoreTable().getParameters());
-  }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    // Create thrift descriptors to send to the BE.  The BE does not
-    // need any information below the THdfsPartition level.
-    TTableDescriptor tableDesc = new TTableDescriptor(id_.asInt(), TTableType.HDFS_TABLE,
-        getTColumnDescriptors(), numClusteringCols_, name_, db_.getName());
-    tableDesc.setHdfsTable(getTHdfsTable(false, referencedPartitions));
-    return tableDesc;
-  }
-
-  @Override
-  public TTable toThrift() {
-    // Send all metadata between the catalog service and the FE.
-    TTable table = super.toThrift();
-    table.setTable_type(TTableType.HDFS_TABLE);
-    table.setHdfs_table(getTHdfsTable(true, null));
-    return table;
-  }
-
-  /**
-   * Create a THdfsTable corresponding to this HdfsTable. If includeFileDesc is true,
-   * then then all partitions and THdfsFileDescs of each partition should be included.
-   * Otherwise, don't include any THdfsFileDescs, and include only those partitions in
-   * the refPartitions set (the backend doesn't need metadata for unreferenced
-   * partitions). To prevent the catalog from hitting an OOM error while trying to
-   * serialize large partition incremental stats, we estimate the stats size and filter
-   * the incremental stats data from partition objects if the estimate exceeds
-   * MAX_INCREMENTAL_STATS_SIZE_BYTES.
-   */
-  private THdfsTable getTHdfsTable(boolean includeFileDesc, Set<Long> refPartitions) {
-    // includeFileDesc implies all partitions should be included (refPartitions == null).
-    Preconditions.checkState(!includeFileDesc || refPartitions == null);
-    int numPartitions =
-        (refPartitions == null) ? partitionMap_.values().size() : refPartitions.size();
-    long statsSizeEstimate =
-        numPartitions * getColumns().size() * STATS_SIZE_PER_COLUMN_BYTES;
-    boolean includeIncrementalStats =
-        (statsSizeEstimate < MAX_INCREMENTAL_STATS_SIZE_BYTES);
-    Map<Long, THdfsPartition> idToPartition = Maps.newHashMap();
-    for (HdfsPartition partition: partitionMap_.values()) {
-      long id = partition.getId();
-      if (refPartitions == null || refPartitions.contains(id)) {
-        idToPartition.put(id,
-            partition.toThrift(includeFileDesc, includeIncrementalStats));
-      }
-    }
-    THdfsTable hdfsTable = new THdfsTable(hdfsBaseDir_, getColumnNames(),
-        nullPartitionKeyValue_, nullColumnValue_, idToPartition);
-    hdfsTable.setAvroSchema(avroSchema_);
-    hdfsTable.setMultiple_filesystems(multipleFileSystems_);
-    if (includeFileDesc) {
-      // Network addresses are used only by THdfsFileBlocks which are inside
-      // THdfsFileDesc, so include network addreses only when including THdfsFileDesc.
-      hdfsTable.setNetwork_addresses(hostIndex_.getList());
-    }
-    hdfsTable.setPartition_prefixes(partitionLocationCompressor_.getPrefixes());
-    return hdfsTable;
-  }
-
-  public long getNumHdfsFiles() { return numHdfsFiles_; }
-  public long getTotalHdfsBytes() { return totalHdfsBytes_; }
-  public String getHdfsBaseDir() { return hdfsBaseDir_; }
-  public boolean isAvroTable() { return avroSchema_ != null; }
-
-  /**
-   * Get the index of hosts that store replicas of blocks of this table.
-   */
-  public ListMap<TNetworkAddress> getHostIndex() { return hostIndex_; }
-
-  /**
-   * Returns the file format that the majority of partitions are stored in.
-   */
-  public HdfsFileFormat getMajorityFormat() {
-    Map<HdfsFileFormat, Integer> numPartitionsByFormat = Maps.newHashMap();
-    for (HdfsPartition partition: partitionMap_.values()) {
-      HdfsFileFormat format = partition.getInputFormatDescriptor().getFileFormat();
-      Integer numPartitions = numPartitionsByFormat.get(format);
-      if (numPartitions == null) {
-        numPartitions = Integer.valueOf(1);
-      } else {
-        numPartitions = Integer.valueOf(numPartitions.intValue() + 1);
-      }
-      numPartitionsByFormat.put(format, numPartitions);
-    }
-
-    int maxNumPartitions = Integer.MIN_VALUE;
-    HdfsFileFormat majorityFormat = null;
-    for (Map.Entry<HdfsFileFormat, Integer> entry: numPartitionsByFormat.entrySet()) {
-      if (entry.getValue().intValue() > maxNumPartitions) {
-        majorityFormat = entry.getKey();
-        maxNumPartitions = entry.getValue().intValue();
-      }
-    }
-    Preconditions.checkNotNull(majorityFormat);
-    return majorityFormat;
-  }
-
-  /**
-   * Returns the HDFS paths corresponding to HdfsTable partitions that don't exist in
-   * the Hive Metastore. An HDFS path is represented as a list of strings values, one per
-   * partition key column.
-   */
-  public List<List<String>> getPathsWithoutPartitions() throws CatalogException {
-    List<List<LiteralExpr>> existingPartitions = new ArrayList<List<LiteralExpr>>();
-    // Get the list of partition values of existing partitions in Hive Metastore.
-    for (HdfsPartition partition: partitionMap_.values()) {
-      if (partition.isDefaultPartition()) continue;
-      existingPartitions.add(partition.getPartitionValues());
-    }
-
-    List<String> partitionKeys = Lists.newArrayList();
-    for (int i = 0; i < numClusteringCols_; ++i) {
-      partitionKeys.add(getColumns().get(i).getName());
-    }
-    Path basePath = new Path(hdfsBaseDir_);
-    List<List<String>> partitionsNotInHms = new ArrayList<List<String>>();
-    try {
-      getAllPartitionsNotInHms(basePath, partitionKeys, existingPartitions,
-          partitionsNotInHms);
-    } catch (Exception e) {
-      throw new CatalogException(String.format("Failed to recover partitions for %s " +
-          "with exception:%s.", getFullName(), e));
-    }
-    return partitionsNotInHms;
-  }
-
-  /**
-   * Returns all partitions which match the partition keys directory structure and pass
-   * type compatibility check. Also these partitions are not already part of the table.
-   */
-  private void getAllPartitionsNotInHms(Path path, List<String> partitionKeys,
-      List<List<LiteralExpr>> existingPartitions,
-      List<List<String>> partitionsNotInHms) throws IOException {
-    FileSystem fs = path.getFileSystem(CONF);
-    // Check whether the base directory exists.
-    if (!fs.exists(path)) return;
-
-    List<String> partitionValues = Lists.newArrayList();
-    List<LiteralExpr> partitionExprs = Lists.newArrayList();
-    getAllPartitionsNotInHms(path, partitionKeys, 0, fs, partitionValues,
-        partitionExprs, existingPartitions, partitionsNotInHms);
-  }
-
-  /**
-   * Returns all partitions which match the partition keys directory structure and pass
-   * the type compatibility check.
-   *
-   * path e.g. c1=1/c2=2/c3=3
-   * partitionKeys The ordered partition keys. e.g.("c1", "c2", "c3")
-   * depth The start position in partitionKeys to match the path name.
-   * partitionValues The partition values used to create a partition.
-   * partitionExprs The list of LiteralExprs which is used to avoid duplicate partitions.
-   * E.g. Having /c1=0001 and /c1=01, we should make sure only one partition
-   * will be added.
-   * existingPartitions All partitions which exist in Hive Metastore or newly added.
-   * partitionsNotInHms Contains all the recovered partitions.
-   */
-  private void getAllPartitionsNotInHms(Path path, List<String> partitionKeys,
-      int depth, FileSystem fs, List<String> partitionValues,
-      List<LiteralExpr> partitionExprs, List<List<LiteralExpr>> existingPartitions,
-      List<List<String>> partitionsNotInHms) throws IOException {
-    if (depth == partitionKeys.size()) {
-      if (existingPartitions.contains(partitionExprs)) {
-        LOG.trace(String.format("Skip recovery of path '%s' because it already exists " +
-            "in metastore", path.toString()));
-      } else {
-        partitionsNotInHms.add(partitionValues);
-        existingPartitions.add(partitionExprs);
-      }
-      return;
-    }
-
-    FileStatus[] statuses = fs.listStatus(path);
-    for (FileStatus status: statuses) {
-      if (!status.isDirectory()) continue;
-      Pair<String, LiteralExpr> keyValues =
-          getTypeCompatibleValue(status.getPath(), partitionKeys.get(depth));
-      if (keyValues == null) continue;
-
-      List<String> currentPartitionValues = Lists.newArrayList(partitionValues);
-      List<LiteralExpr> currentPartitionExprs = Lists.newArrayList(partitionExprs);
-      currentPartitionValues.add(keyValues.first);
-      currentPartitionExprs.add(keyValues.second);
-      getAllPartitionsNotInHms(status.getPath(), partitionKeys, depth + 1, fs,
-          currentPartitionValues, currentPartitionExprs,
-          existingPartitions, partitionsNotInHms);
-    }
-  }
-
-  /**
-   * Checks that the last component of 'path' is of the form "<partitionkey>=<v>"
-   * where 'v' is a type-compatible value from the domain of the 'partitionKey' column.
-   * If not, returns null, otherwise returns a Pair instance, the first element is the
-   * original value, the second element is the LiteralExpr created from the original
-   * value.
-   */
-  private Pair<String, LiteralExpr> getTypeCompatibleValue(Path path,
-      String partitionKey) {
-    String partName[] = path.getName().split("=");
-    if (partName.length != 2 || !partName[0].equals(partitionKey)) return null;
-
-    // Check Type compatibility for Partition value.
-    Column column = getColumn(partName[0]);
-    Preconditions.checkNotNull(column);
-    Type type = column.getType();
-    LiteralExpr expr = null;
-    if (!partName[1].equals(getNullPartitionKeyValue())) {
-      try {
-        expr = LiteralExpr.create(partName[1], type);
-        // Skip large value which exceeds the MAX VALUE of specified Type.
-        if (expr instanceof NumericLiteral) {
-          if (NumericLiteral.isOverflow(((NumericLiteral)expr).getValue(), type)) {
-            LOG.warn(String.format("Skip the overflow value (%s) for Type (%s).",
-                partName[1], type.toSql()));
-            return null;
-          }
-        }
-      } catch (Exception ex) {
-        LOG.debug(String.format("Invalid partition value (%s) for Type (%s).",
-            partName[1], type.toSql()));
-        return null;
-      }
-    } else {
-      expr = new NullLiteral();
-    }
-    return new Pair<String, LiteralExpr>(partName[1], expr);
-  }
-
-  /**
-   * Returns statistics on this table as a tabular result set. Used for the
-   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
-   * inside this method.
-   */
-  public TResultSet getTableStats() {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-
-    for (int i = 0; i < numClusteringCols_; ++i) {
-      // Add the partition-key values as strings for simplicity.
-      Column partCol = getColumns().get(i);
-      TColumn colDesc = new TColumn(partCol.getName(), Type.STRING.toThrift());
-      resultSchema.addToColumns(colDesc);
-    }
-
-    resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("#Files", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Bytes Cached", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Cache Replication", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Format", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Incremental stats", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Location", Type.STRING.toThrift()));
-
-    // Pretty print partitions and their stats.
-    ArrayList<HdfsPartition> orderedPartitions =
-        Lists.newArrayList(partitionMap_.values());
-    Collections.sort(orderedPartitions);
-
-    long totalCachedBytes = 0L;
-    for (HdfsPartition p: orderedPartitions) {
-      // Ignore dummy default partition.
-      if (p.isDefaultPartition()) continue;
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-
-      // Add the partition-key values (as strings for simplicity).
-      for (LiteralExpr expr: p.getPartitionValues()) {
-        rowBuilder.add(expr.getStringValue());
-      }
-
-      // Add number of rows, files, bytes, cache stats, and file format.
-      rowBuilder.add(p.getNumRows()).add(p.getFileDescriptors().size())
-          .addBytes(p.getSize());
-      if (!p.isMarkedCached()) {
-        // Helps to differentiate partitions that have 0B cached versus partitions
-        // that are not marked as cached.
-        rowBuilder.add("NOT CACHED");
-        rowBuilder.add("NOT CACHED");
-      } else {
-        // Calculate the number the number of bytes that are cached.
-        long cachedBytes = 0L;
-        for (FileDescriptor fd: p.getFileDescriptors()) {
-          for (THdfsFileBlock fb: fd.getFileBlocks()) {
-            if (fb.getIs_replica_cached().contains(true)) {
-              cachedBytes += fb.getLength();
-            }
-          }
-        }
-        totalCachedBytes += cachedBytes;
-        rowBuilder.addBytes(cachedBytes);
-
-        // Extract cache replication factor from the parameters of the table
-        // if the table is not partitioned or directly from the partition.
-        Short rep = HdfsCachingUtil.getCachedCacheReplication(
-            numClusteringCols_ == 0 ?
-            p.getTable().getMetaStoreTable().getParameters() :
-            p.getParameters());
-        rowBuilder.add(rep.toString());
-      }
-      rowBuilder.add(p.getInputFormatDescriptor().getFileFormat().toString());
-
-      rowBuilder.add(String.valueOf(p.hasIncrementalStats()));
-      rowBuilder.add(p.getLocation());
-      result.addToRows(rowBuilder.get());
-    }
-
-    // For partitioned tables add a summary row at the bottom.
-    if (numClusteringCols_ > 0) {
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-      int numEmptyCells = numClusteringCols_ - 1;
-      rowBuilder.add("Total");
-      for (int i = 0; i < numEmptyCells; ++i) {
-        rowBuilder.add("");
-      }
-
-      // Total num rows, files, and bytes (leave format empty).
-      rowBuilder.add(numRows_).add(numHdfsFiles_).addBytes(totalHdfsBytes_)
-          .addBytes(totalCachedBytes).add("").add("").add("").add("");
-      result.addToRows(rowBuilder.get());
-    }
-    return result;
-  }
-
-  /**
-   * Returns files info for the given dbname/tableName and partition spec.
-   * Returns files info for all partitions, if partition spec is null, ordered
-   * by partition.
-   */
-  public TResultSet getFiles(List<TPartitionKeyValue> partitionSpec)
-      throws CatalogException {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-    resultSchema.addToColumns(new TColumn("Path", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Partition", Type.STRING.toThrift()));
-    result.setRows(Lists.<TResultRow>newArrayList());
-
-    List<HdfsPartition> orderedPartitions = null;
-    if (partitionSpec == null) {
-      orderedPartitions = Lists.newArrayList(partitionMap_.values());
-      Collections.sort(orderedPartitions);
-    } else {
-      // Get the HdfsPartition object for the given partition spec.
-      HdfsPartition partition = getPartitionFromThriftPartitionSpec(partitionSpec);
-      Preconditions.checkState(partition != null);
-      orderedPartitions = Lists.newArrayList(partition);
-    }
-
-    for (HdfsPartition p: orderedPartitions) {
-      List<FileDescriptor> orderedFds = Lists.newArrayList(p.getFileDescriptors());
-      Collections.sort(orderedFds);
-      for (FileDescriptor fd: orderedFds) {
-        TResultRowBuilder rowBuilder = new TResultRowBuilder();
-        rowBuilder.add(p.getLocation() + "/" + fd.getFileName());
-        rowBuilder.add(PrintUtils.printBytes(fd.getFileLength()));
-        rowBuilder.add(p.getPartitionName());
-        result.addToRows(rowBuilder.get());
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Constructs a partition name from a list of TPartitionKeyValue objects.
-   */
-  public static String constructPartitionName(List<TPartitionKeyValue> partitionSpec) {
-    List<String> partitionCols = Lists.newArrayList();
-    List<String> partitionVals = Lists.newArrayList();
-    for (TPartitionKeyValue kv: partitionSpec) {
-      partitionCols.add(kv.getName());
-      partitionVals.add(kv.getValue());
-    }
-    return org.apache.hadoop.hive.common.FileUtils.makePartName(partitionCols,
-        partitionVals);
-  }
-
-  /**
-   * Reloads the metadata of partition 'oldPartition' by removing
-   * it from the table and reconstructing it from the HMS partition object
-   * 'hmsPartition'. If old partition is null then nothing is removed and
-   * and partition constructed from 'hmsPartition' is simply added.
-   */
-  public void reloadPartition(HdfsPartition oldPartition, Partition hmsPartition)
-      throws CatalogException {
-    HdfsPartition refreshedPartition = createPartition(
-        hmsPartition.getSd(), hmsPartition);
-    Preconditions.checkArgument(oldPartition == null
-        || oldPartition.compareTo(refreshedPart

<TRUNCATED>


[05/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
new file mode 100644
index 0000000..a00bf53
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
@@ -0,0 +1,680 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.text.SimpleDateFormat;
+import java.util.Collection;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.JSONValue;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.Id;
+import com.cloudera.impala.common.IdGenerator;
+import com.cloudera.impala.thrift.TEdgeType;
+import com.cloudera.impala.thrift.TQueryCtx;
+import com.cloudera.impala.thrift.TLineageGraph;
+import com.cloudera.impala.thrift.TMultiEdge;
+import com.cloudera.impala.thrift.TVertex;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.hash.Hasher;
+import com.google.common.hash.Hashing;
+
+/**
+ * Represents a vertex in the column lineage graph. A Vertex may correspond to a base
+ * table column, a column in the destination table (for the case of INSERT or CTAS
+ * queries) or a result expr (labeled column of a query result set).
+ */
+final class Vertex implements Comparable<Vertex> {
+  // Unique identifier of this vertex.
+  private final VertexId id_;
+
+  private final String type_ = "COLUMN";
+
+  // A fully-qualified column name or the label of a result expr
+  private final String label_;
+
+  public Vertex(VertexId id, String label) {
+    Preconditions.checkNotNull(id);
+    Preconditions.checkNotNull(label);
+    id_ = id;
+    label_ = label;
+  }
+  public VertexId getVertexId() { return id_; }
+  public String getLabel() { return label_; }
+  public String getType() { return type_; }
+
+  @Override
+  public String toString() { return "(" + id_ + ":" + type_ + ":" + label_ + ")"; }
+
+  /**
+   * Encodes this Vertex object into a JSON object represented by a Map.
+   */
+  public Map toJson() {
+    // Use a LinkedHashMap to generate a strict ordering of elements.
+    Map obj = new LinkedHashMap();
+    obj.put("id", id_.asInt());
+    obj.put("vertexType", type_);
+    obj.put("vertexId", label_);
+    return obj;
+  }
+
+  /**
+   * Constructs a Vertex object from a JSON object. The new object is returned.
+   */
+  public static Vertex fromJsonObj(JSONObject obj) {
+    int id = ((Long) obj.get("id")).intValue();
+    String label = (String) obj.get("vertexId");
+    return new Vertex(new VertexId(id), label);
+  }
+
+  /**
+   * Encodes this Vertex object into a thrift object
+   */
+  public TVertex toThrift() {
+    return new TVertex(id_.asInt(), label_);
+  }
+
+  /**
+   * Constructs a Vertex object from a thrift object.
+   */
+  public static Vertex fromThrift(TVertex vertex) {
+    int id = ((Long) vertex.id).intValue();
+    return new Vertex(new VertexId(id), vertex.label);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) return false;
+    if (obj.getClass() != this.getClass()) return false;
+    Vertex vertex = (Vertex) obj;
+    return this.id_.equals(vertex.id_) &&
+        this.label_.equals(vertex.label_);
+  }
+
+  public int compareTo(Vertex cmp) { return this.id_.compareTo(cmp.id_); }
+
+  @Override
+  public int hashCode() { return id_.hashCode(); }
+}
+
+/**
+ * Represents the unique identifier of a Vertex.
+ */
+class VertexId extends Id<VertexId> {
+  protected VertexId(int id) {
+    super(id);
+  }
+  public static IdGenerator<VertexId> createGenerator() {
+    return new IdGenerator<VertexId>() {
+      @Override
+      public VertexId getNextId() { return new VertexId(nextId_++); }
+      @Override
+      public VertexId getMaxId() { return new VertexId(nextId_ - 1); }
+    };
+  }
+}
+
+/**
+ * Represents a set of uni-directional edges in the column lineage graph, one edge from
+ * every source Vertex in 'sources_' to every target Vertex in 'targets_'. An edge
+ * indicates a dependency between a source and a target Vertex. There are two types of
+ * edges, PROJECTION and PREDICATE, that are described in the ColumnLineageGraph class.
+ */
+final class MultiEdge {
+  public static enum EdgeType {
+    PROJECTION, PREDICATE
+  }
+  private final Set<Vertex> sources_;
+  private final Set<Vertex> targets_;
+  private final EdgeType edgeType_;
+
+  public MultiEdge(Set<Vertex> sources, Set<Vertex> targets, EdgeType type) {
+    sources_ = sources;
+    targets_ = targets;
+    edgeType_ = type;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    Joiner joiner = Joiner.on(",");
+    builder.append("Sources: [");
+    builder.append(joiner.join(sources_) + "]\n");
+    builder.append("Targets: [");
+    builder.append(joiner.join(targets_) + "]\n");
+    builder.append("Type: " + edgeType_);
+    return builder.toString();
+  }
+
+  /**
+   * Encodes this MultiEdge object to a JSON object represented by a Map.
+   */
+  public Map toJson() {
+    Map obj = new LinkedHashMap();
+    // Add sources
+    JSONArray sourceIds = new JSONArray();
+    for (Vertex vertex: sources_) {
+      sourceIds.add(vertex.getVertexId());
+    }
+    obj.put("sources", sourceIds);
+    // Add targets
+    JSONArray targetIds = new JSONArray();
+    for (Vertex vertex: targets_) {
+      targetIds.add(vertex.getVertexId());
+    }
+    obj.put("targets", targetIds);
+    obj.put("edgeType", edgeType_.toString());
+    return obj;
+  }
+
+  /**
+   * Encodes this MultiEdge object to a thrift object
+   */
+  public TMultiEdge toThrift() {
+    List<TVertex> sources = Lists.newArrayList();
+    for (Vertex vertex: sources_) {
+      sources.add(vertex.toThrift());
+    }
+    List<TVertex> targets = Lists.newArrayList();
+    for (Vertex vertex: targets_) {
+      targets.add(vertex.toThrift());
+    }
+    if (edgeType_ == EdgeType.PROJECTION) {
+      return new TMultiEdge(sources, targets, TEdgeType.PROJECTION);
+    }
+    return new TMultiEdge(sources, targets, TEdgeType.PREDICATE);
+  }
+
+  /**
+   * Constructs a MultiEdge object from a thrift object
+   */
+  public static MultiEdge fromThrift(TMultiEdge obj){
+    Set<Vertex> sources = Sets.newHashSet();
+    for (TVertex vertex: obj.sources) {
+      sources.add(Vertex.fromThrift(vertex));
+    }
+    Set<Vertex> targets = Sets.newHashSet();
+    for (TVertex vertex: obj.targets) {
+      targets.add(Vertex.fromThrift(vertex));
+    }
+    if (obj.edgetype == TEdgeType.PROJECTION) {
+      return new MultiEdge(sources, targets, EdgeType.PROJECTION);
+    }
+    return new MultiEdge(sources, targets, EdgeType.PREDICATE);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) return false;
+    if (obj.getClass() != this.getClass()) return false;
+    MultiEdge edge = (MultiEdge) obj;
+    return edge.sources_.equals(this.sources_) &&
+        edge.targets_.equals(this.targets_) &&
+        edge.edgeType_ == this.edgeType_;
+  }
+}
+
+/**
+ * Represents the column lineage graph of a query. This is a directional graph that is
+ * used to track dependencies among the table/column entities that participate in
+ * a query. There are two types of dependencies that are represented as edges in the
+ * column lineage graph:
+ * a) Projection dependency: This is a dependency between a set of source
+ * columns (base table columns) and a single target (result expr or table column).
+ * This dependency indicates that values of the target depend on the values of the source
+ * columns.
+ * b) Predicate dependency: This is a dependency between a set of target
+ * columns (or exprs) and a set of source columns (base table columns). It indicates that
+ * the source columns restrict the values of their targets (e.g. by participating in
+ * WHERE clause predicates).
+ *
+ * The following dependencies are generated for a query:
+ * - Exactly one projection dependency for every result expr / target column.
+ * - Exactly one predicate dependency that targets all result exprs / target cols and
+ *   depends on all columns participating in a conjunct in the query.
+ * - Special case of analytic fns: One predicate dependency per result expr / target col
+ *   whose value is directly or indirectly affected by an analytic function with a
+ *   partition by and/or order by clause.
+ */
+public class ColumnLineageGraph {
+  private final static Logger LOG = LoggerFactory.getLogger(ColumnLineageGraph.class);
+  // Query statement
+  private String queryStr_;
+
+  // Name of the user that issued this query
+  private String user_;
+
+  private final List<Expr> resultDependencyPredicates_ = Lists.newArrayList();
+
+  private final List<MultiEdge> edges_ = Lists.newArrayList();
+
+  // Timestamp in seconds since epoch (GMT) this query was submitted for execution.
+  private long timestamp_;
+
+  // Map of Vertex labels to Vertex objects.
+  private final Map<String, Vertex> vertices_ = Maps.newHashMap();
+
+  // Map of Vertex ids to Vertex objects. Used primarily during the construction of the
+  // ColumnLineageGraph from a serialized JSON object.
+  private final Map<VertexId, Vertex> idToVertexMap_ = Maps.newHashMap();
+
+  // For an INSERT or a CTAS, these are the columns of the
+  // destination table plus any partitioning columns (when dynamic partitioning is used).
+  // For a SELECT stmt, they are the labels of the result exprs.
+  private final List<String> targetColumnLabels_ = Lists.newArrayList();
+
+  // Repository for tuple and slot descriptors for this query. Use it to construct the
+  // column lineage graph.
+  private DescriptorTable descTbl_;
+
+  private final IdGenerator<VertexId> vertexIdGenerator = VertexId.createGenerator();
+
+  public ColumnLineageGraph() { }
+
+  /**
+   * Private c'tor, used only for testing.
+   */
+  private ColumnLineageGraph(String stmt, String user, long timestamp) {
+    queryStr_ = stmt;
+    user_ = user;
+    timestamp_ = timestamp;
+  }
+
+  private void setVertices(Set<Vertex> vertices) {
+    for (Vertex vertex: vertices) {
+      vertices_.put(vertex.getLabel(), vertex);
+      idToVertexMap_.put(vertex.getVertexId(), vertex);
+    }
+  }
+
+  /**
+   * Creates a new MultiEdge in the column lineage graph from the sets of 'sources' and
+   * 'targets' labels (representing column names or result expr labels). The new
+   * MultiEdge object is returned.
+   */
+  private MultiEdge createMultiEdge(Set<String> targets, Set<String> sources,
+      MultiEdge.EdgeType type) {
+    Set<Vertex> targetVertices = Sets.newHashSet();
+    for (String target: targets) {
+      targetVertices.add(createVertex(target));
+    }
+    Set<Vertex> sourceVertices = Sets.newHashSet();
+    for (String source: sources) {
+      sourceVertices.add(createVertex(source));
+    }
+    MultiEdge edge = new MultiEdge(sourceVertices, targetVertices, type);
+    edges_.add(edge);
+    return edge;
+  }
+
+  /**
+   * Creates a new vertex in the column lineage graph. The new Vertex object is
+   * returned. If a Vertex with the same label already exists, reuse it.
+   */
+  private Vertex createVertex(String label) {
+    Vertex newVertex = vertices_.get(label);
+    if (newVertex != null) return newVertex;
+    newVertex = new Vertex(vertexIdGenerator.getNextId(), label);
+    vertices_.put(newVertex.getLabel(), newVertex);
+    idToVertexMap_.put(newVertex.getVertexId(), newVertex);
+    return newVertex;
+  }
+
+  /**
+   * Computes the column lineage graph of a query from the list of query result exprs.
+   * 'rootAnalyzer' is the Analyzer that was used for the analysis of the query.
+   */
+  public void computeLineageGraph(List<Expr> resultExprs, Analyzer rootAnalyzer) {
+    init(rootAnalyzer);
+    computeProjectionDependencies(resultExprs);
+    computeResultPredicateDependencies(rootAnalyzer);
+  }
+
+  /**
+   * Initialize the ColumnLineageGraph from the root analyzer of a query.
+   */
+  private void init(Analyzer analyzer) {
+    Preconditions.checkNotNull(analyzer);
+    Preconditions.checkState(analyzer.isRootAnalyzer());
+    TQueryCtx queryCtx = analyzer.getQueryCtx();
+    if (queryCtx.request.isSetRedacted_stmt()) {
+      queryStr_ = queryCtx.request.redacted_stmt;
+    } else {
+      queryStr_ = queryCtx.request.stmt;
+    }
+    Preconditions.checkNotNull(queryStr_);
+    SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+    try {
+      timestamp_ = df.parse(queryCtx.now_string).getTime() / 1000;
+    } catch (java.text.ParseException e) {
+      LOG.error("Error parsing timestamp value: " + queryCtx.now_string +
+          " " + e.getMessage());
+      timestamp_ = new Date().getTime() / 1000;
+    }
+    descTbl_ = analyzer.getDescTbl();
+    user_ = analyzer.getUser().getName();
+  }
+
+  private void computeProjectionDependencies(List<Expr> resultExprs) {
+    Preconditions.checkNotNull(resultExprs);
+    Preconditions.checkState(!resultExprs.isEmpty());
+    Preconditions.checkState(resultExprs.size() == targetColumnLabels_.size());
+    for (int i = 0; i < resultExprs.size(); ++i) {
+      Expr expr = resultExprs.get(i);
+      Set<String> sourceBaseCols = Sets.newHashSet();
+      List<Expr> dependentExprs = Lists.newArrayList();
+      getSourceBaseCols(expr, sourceBaseCols, dependentExprs, false);
+      Set<String> targets = Sets.newHashSet(targetColumnLabels_.get(i));
+      createMultiEdge(targets, sourceBaseCols, MultiEdge.EdgeType.PROJECTION);
+      if (!dependentExprs.isEmpty()) {
+        // We have additional exprs that 'expr' has a predicate dependency on.
+        // Gather the transitive predicate dependencies of 'expr' based on its direct
+        // predicate dependencies. For each direct predicate dependency p, 'expr' is
+        // transitively predicate dependent on all exprs that p is projection and
+        // predicate dependent on.
+        Set<String> predicateBaseCols = Sets.newHashSet();
+        for (Expr dependentExpr: dependentExprs) {
+          getSourceBaseCols(dependentExpr, predicateBaseCols, null, true);
+        }
+        createMultiEdge(targets, predicateBaseCols, MultiEdge.EdgeType.PREDICATE);
+      }
+    }
+  }
+
+  /**
+   * Compute predicate dependencies for the query result, i.e. exprs that affect the
+   * possible values of the result exprs / target columns, such as predicates in a WHERE
+   * clause.
+   */
+  private void computeResultPredicateDependencies(Analyzer analyzer) {
+    List<Expr> conjuncts = analyzer.getConjuncts();
+    for (Expr expr: conjuncts) {
+      if (expr.isAuxExpr()) continue;
+      resultDependencyPredicates_.add(expr);
+    }
+    Set<String> predicateBaseCols = Sets.newHashSet();
+    for (Expr expr: resultDependencyPredicates_) {
+      getSourceBaseCols(expr, predicateBaseCols, null, true);
+    }
+    if (predicateBaseCols.isEmpty()) return;
+    Set<String> targets = Sets.newHashSet(targetColumnLabels_);
+    createMultiEdge(targets, predicateBaseCols, MultiEdge.EdgeType.PREDICATE);
+  }
+
+  /**
+   * Identify the base table columns that 'expr' is connected to by recursively resolving
+   * all associated slots through inline views and materialization points to base-table
+   * slots. If 'directPredDeps' is not null, it is populated with the exprs that
+   * have a predicate dependency with 'expr' (e.g. partitioning and order by exprs for
+   * the case of an analytic function). If 'traversePredDeps' is false, not all the
+   * children exprs of 'expr' are used to identify the base columns that 'expr' is
+   * connected to. Which children are filtered depends on the type of 'expr' (e.g. for
+   * AnalyticFunctionExpr, grouping and sorting exprs are filtered out).
+   */
+  private void getSourceBaseCols(Expr expr, Set<String> sourceBaseCols,
+      List<Expr> directPredDeps, boolean traversePredDeps) {
+    List<Expr> exprsToTraverse = getProjectionDeps(expr);
+    List<Expr> predicateDepExprs = getPredicateDeps(expr);
+    if (directPredDeps != null) directPredDeps.addAll(predicateDepExprs);
+    if (traversePredDeps) exprsToTraverse.addAll(predicateDepExprs);
+    List<SlotId> slotIds = Lists.newArrayList();
+    for (Expr e: exprsToTraverse) {
+      e.getIds(null, slotIds);
+    }
+    for (SlotId slotId: slotIds) {
+      SlotDescriptor slotDesc = descTbl_.getSlotDesc(slotId);
+      List<Expr> sourceExprs = slotDesc.getSourceExprs();
+      if (sourceExprs.isEmpty() && slotDesc.isScanSlot() &&
+          slotDesc.getPath().isRootedAtTuple()) {
+        // slot should correspond to a materialized tuple of a table
+        Preconditions.checkState(slotDesc.getParent().isMaterialized());
+        List<String> path = slotDesc.getPath().getCanonicalPath();
+        sourceBaseCols.add(Joiner.on(".").join(path));
+      } else {
+        for (Expr sourceExpr: sourceExprs) {
+          getSourceBaseCols(sourceExpr, sourceBaseCols, directPredDeps,
+              traversePredDeps);
+        }
+      }
+    }
+  }
+
+  /**
+   * Retrieve the exprs that 'e' is directly projection dependent on.
+   * TODO Handle conditional exprs (e.g. CASE, IF).
+   */
+  private List<Expr> getProjectionDeps(Expr e) {
+    Preconditions.checkNotNull(e);
+    List<Expr> outputExprs = Lists.newArrayList();
+    if (e instanceof AnalyticExpr) {
+      AnalyticExpr analytic = (AnalyticExpr) e;
+      outputExprs.addAll(analytic.getChildren().subList(0,
+          analytic.getFnCall().getParams().size()));
+    } else {
+      outputExprs.add(e);
+    }
+    return outputExprs;
+  }
+
+  /**
+   * Retrieve the exprs that 'e' is directly predicate dependent on.
+   * TODO Handle conditional exprs (e.g. CASE, IF).
+   */
+  private List<Expr> getPredicateDeps(Expr e) {
+    Preconditions.checkNotNull(e);
+    List<Expr> outputExprs = Lists.newArrayList();
+    if (e instanceof AnalyticExpr) {
+      AnalyticExpr analyticExpr = (AnalyticExpr) e;
+      outputExprs.addAll(analyticExpr.getPartitionExprs());
+      for (OrderByElement orderByElem: analyticExpr.getOrderByElements()) {
+        outputExprs.add(orderByElem.getExpr());
+      }
+    }
+    return outputExprs;
+  }
+
+  public void addDependencyPredicates(Collection<Expr> exprs) {
+    resultDependencyPredicates_.addAll(exprs);
+  }
+
+  /**
+   * Encodes the ColumnLineageGraph object to JSON.
+   */
+  public String toJson() {
+    if (Strings.isNullOrEmpty(queryStr_)) return "";
+    Map obj = new LinkedHashMap();
+    obj.put("queryText", queryStr_);
+    obj.put("hash", getQueryHash(queryStr_));
+    obj.put("user", user_);
+    obj.put("timestamp", timestamp_);
+    // Add edges
+    JSONArray edges = new JSONArray();
+    for (MultiEdge edge: edges_) {
+      edges.add(edge.toJson());
+    }
+    obj.put("edges", edges);
+    // Add vertices
+    TreeSet<Vertex> sortedVertices = Sets.newTreeSet(vertices_.values());
+    JSONArray vertices = new JSONArray();
+    for (Vertex vertex: sortedVertices) {
+      vertices.add(vertex.toJson());
+    }
+    obj.put("vertices", vertices);
+    return JSONValue.toJSONString(obj);
+  }
+
+  /**
+   * Serializes the ColumnLineageGraph to a thrift object
+   */
+  public TLineageGraph toThrift() {
+    TLineageGraph graph = new TLineageGraph();
+    if (Strings.isNullOrEmpty(queryStr_)) return graph;
+    graph.setQuery_text(queryStr_);
+    graph.setHash(getQueryHash(queryStr_));
+    graph.setUser(user_);
+    graph.setStarted(timestamp_);
+    // Add edges
+    List<TMultiEdge> edges = Lists.newArrayList();
+    for (MultiEdge edge: edges_) {
+      edges.add(edge.toThrift());
+    }
+    graph.setEdges(edges);
+    // Add vertices
+    TreeSet<Vertex> sortedVertices = Sets.newTreeSet(vertices_.values());
+    List<TVertex> vertices = Lists.newArrayList();
+    for (Vertex vertex: sortedVertices) {
+      vertices.add(vertex.toThrift());
+    }
+    graph.setVertices(vertices);
+    return graph;
+  }
+
+  /**
+   * Creates a LineageGraph object from a thrift object
+   */
+  public static ColumnLineageGraph fromThrift(TLineageGraph obj) {
+    ColumnLineageGraph lineage =
+        new ColumnLineageGraph(obj.query_text, obj.user, obj.started);
+    TreeSet<Vertex> vertices = Sets.newTreeSet();
+    for (TVertex vertex: obj.vertices) {
+      vertices.add(Vertex.fromThrift(vertex));
+    }
+    lineage.setVertices(vertices);
+    for (TMultiEdge edge: obj.edges) {
+      MultiEdge e = MultiEdge.fromThrift(edge);
+      lineage.edges_.add(e);
+    }
+    return lineage;
+  }
+
+  private String getQueryHash(String queryStr) {
+    Hasher hasher = Hashing.md5().newHasher();
+    hasher.putString(queryStr);
+    return hasher.hash().toString();
+  }
+
+  /**
+   * Creates a ColumnLineageGraph object from a serialized JSON record. The new
+   * ColumnLineageGraph object is returned. Used only during testing.
+   */
+  public static ColumnLineageGraph createFromJSON(String json) {
+    if (json == null || json.isEmpty()) return null;
+    JSONParser parser = new JSONParser();
+    Object obj = null;
+    try {
+      obj = parser.parse(json);
+    } catch (ParseException e) {
+      LOG.error("Error parsing serialized column lineage graph: " + e.getMessage());
+      return null;
+    }
+    if (!(obj instanceof JSONObject)) return null;
+    JSONObject jsonObj = (JSONObject) obj;
+    String stmt = (String) jsonObj.get("queryText");
+    String hash = (String) jsonObj.get("hash");
+    String user = (String) jsonObj.get("user");
+    long timestamp = (Long) jsonObj.get("timestamp");
+    ColumnLineageGraph graph = new ColumnLineageGraph(stmt, user, timestamp);
+    JSONArray serializedVertices = (JSONArray) jsonObj.get("vertices");
+    Set<Vertex> vertices = Sets.newHashSet();
+    for (int i = 0; i < serializedVertices.size(); ++i) {
+      Vertex v = Vertex.fromJsonObj((JSONObject) serializedVertices.get(i));
+      vertices.add(v);
+    }
+    graph.setVertices(vertices);
+    JSONArray serializedEdges = (JSONArray) jsonObj.get("edges");
+    for (int i = 0; i < serializedEdges.size(); ++i) {
+      MultiEdge e =
+          graph.createMultiEdgeFromJSONObj((JSONObject) serializedEdges.get(i));
+      graph.edges_.add(e);
+    }
+    return graph;
+  }
+
+  private MultiEdge createMultiEdgeFromJSONObj(JSONObject jsonEdge) {
+    Preconditions.checkNotNull(jsonEdge);
+    JSONArray sources = (JSONArray) jsonEdge.get("sources");
+    Set<Vertex> sourceVertices = getVerticesFromJSONArray(sources);
+    JSONArray targets = (JSONArray) jsonEdge.get("targets");
+    Set<Vertex> targetVertices = getVerticesFromJSONArray(targets);
+    MultiEdge.EdgeType type =
+        MultiEdge.EdgeType.valueOf((String) jsonEdge.get("edgeType"));
+    return new MultiEdge(sourceVertices, targetVertices, type);
+  }
+
+  private Set<Vertex> getVerticesFromJSONArray(JSONArray vertexIdArray) {
+    Set<Vertex> vertices = Sets.newHashSet();
+    for (int i = 0; i < vertexIdArray.size(); ++i) {
+      int sourceId = ((Long) vertexIdArray.get(i)).intValue();
+      Vertex sourceVertex = idToVertexMap_.get(new VertexId(sourceId));
+      Preconditions.checkNotNull(sourceVertex);
+      vertices.add(sourceVertex);
+    }
+    return vertices;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) return false;
+    if (obj.getClass() != this.getClass()) return false;
+    ColumnLineageGraph g = (ColumnLineageGraph) obj;
+    if (!this.vertices_.equals(g.vertices_) ||
+        !this.edges_.equals(g.edges_)) {
+      return false;
+    }
+    return true;
+  }
+
+  public String debugString() {
+    StringBuilder builder = new StringBuilder();
+    for (MultiEdge edge: edges_) {
+      builder.append(edge.toString() + "\n");
+    }
+    builder.append(toJson());
+    return builder.toString();
+  }
+
+  public void addTargetColumnLabels(Collection<String> columnLabels) {
+    Preconditions.checkNotNull(columnLabels);
+    targetColumnLabels_.addAll(columnLabels);
+  }
+
+  public void addTargetColumnLabels(Table dstTable) {
+    Preconditions.checkNotNull(dstTable);
+    String tblFullName = dstTable.getFullName();
+    for (String columnName: dstTable.getColumnNames()) {
+      targetColumnLabels_.add(tblFullName + "." + columnName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java b/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
new file mode 100644
index 0000000..4869004
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
@@ -0,0 +1,216 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * &&, ||, ! predicates.
+ *
+ */
+public class CompoundPredicate extends Predicate {
+  public enum Operator {
+    AND("AND"),
+    OR("OR"),
+    NOT("NOT");
+
+    private final String description;
+
+    private Operator(String description) {
+      this.description = description;
+    }
+
+    @Override
+    public String toString() {
+      return description;
+    }
+  }
+  private final Operator op_;
+
+  public static void initBuiltins(Db db) {
+    // AND and OR are implemented as custom exprs, so they do not have a function symbol.
+    db.addBuiltin(ScalarFunction.createBuiltinOperator(
+        Operator.AND.name(), "",
+        Lists.<Type>newArrayList(Type.BOOLEAN, Type.BOOLEAN), Type.BOOLEAN));
+    db.addBuiltin(ScalarFunction.createBuiltinOperator(
+        Operator.OR.name(), "",
+        Lists.<Type>newArrayList(Type.BOOLEAN, Type.BOOLEAN), Type.BOOLEAN));
+    db.addBuiltin(ScalarFunction.createBuiltinOperator(
+        Operator.NOT.name(), "impala::CompoundPredicate::Not",
+        Lists.<Type>newArrayList(Type.BOOLEAN), Type.BOOLEAN));
+  }
+
+  public CompoundPredicate(Operator op, Expr e1, Expr e2) {
+    super();
+    this.op_ = op;
+    Preconditions.checkNotNull(e1);
+    children_.add(e1);
+    Preconditions.checkArgument(op == Operator.NOT && e2 == null
+        || op != Operator.NOT && e2 != null);
+    if (e2 != null) children_.add(e2);
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected CompoundPredicate(CompoundPredicate other) {
+    super(other);
+    op_ = other.op_;
+  }
+
+  public Operator getOp() { return op_; }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    return ((CompoundPredicate) obj).op_ == op_;
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("op", op_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  public String toSqlImpl() {
+    if (children_.size() == 1) {
+      Preconditions.checkState(op_ == Operator.NOT);
+      return "NOT " + getChild(0).toSql();
+    } else {
+      return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
+    }
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    msg.node_type = TExprNodeType.COMPOUND_PRED;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+
+    // Check that children are predicates.
+    for (Expr e: children_) {
+      if (!e.getType().isBoolean() && !e.getType().isNull()) {
+        throw new AnalysisException(String.format("Operand '%s' part of predicate " +
+            "'%s' should return type 'BOOLEAN' but returns type '%s'.",
+            e.toSql(), toSql(), e.getType().toSql()));
+      }
+    }
+
+    fn_ = getBuiltinFunction(analyzer, op_.toString(), collectChildReturnTypes(),
+        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+    Preconditions.checkState(fn_ != null);
+    Preconditions.checkState(fn_.getReturnType().isBoolean());
+    castForFunctionCall(false);
+    if (hasChildCosts()) evalCost_ = getChildCosts() + COMPOUND_PREDICATE_COST;
+
+    if (!getChild(0).hasSelectivity() ||
+        (children_.size() == 2 && !getChild(1).hasSelectivity())) {
+      // Give up if one of our children has an unknown selectivity.
+      selectivity_ = -1;
+      return;
+    }
+
+    switch (op_) {
+      case AND:
+        selectivity_ = getChild(0).selectivity_ * getChild(1).selectivity_;
+        break;
+      case OR:
+        selectivity_ = getChild(0).selectivity_ + getChild(1).selectivity_
+            - getChild(0).selectivity_ * getChild(1).selectivity_;
+        break;
+      case NOT:
+        selectivity_ = 1.0 - getChild(0).selectivity_;
+        break;
+    }
+    selectivity_ = Math.max(0.0, Math.min(1.0, selectivity_));
+  }
+
+  /**
+   * Retrieve the slots bound by BinaryPredicate, InPredicate and
+   * CompoundPredicates in the subtree rooted at 'this'.
+   */
+  public ArrayList<SlotRef> getBoundSlots() {
+    ArrayList<SlotRef> slots = Lists.newArrayList();
+    for (int i = 0; i < getChildren().size(); ++i) {
+      if (getChild(i) instanceof BinaryPredicate ||
+          getChild(i) instanceof InPredicate) {
+        slots.add(((Predicate)getChild(i)).getBoundSlot());
+      } else if (getChild(i) instanceof CompoundPredicate) {
+        slots.addAll(((CompoundPredicate)getChild(i)).getBoundSlots());
+      }
+    }
+    return slots;
+  }
+
+  /**
+   * Negates a CompoundPredicate.
+   */
+  @Override
+  public Expr negate() {
+    if (op_ == Operator.NOT) return getChild(0);
+    Expr negatedLeft = getChild(0).negate();
+    Expr negatedRight = getChild(1).negate();
+    Operator newOp = (op_ == Operator.OR) ? Operator.AND : Operator.OR;
+    return new CompoundPredicate(newOp, negatedLeft, negatedRight);
+  }
+
+  /**
+   * Creates a conjunctive predicate from a list of exprs.
+   */
+  public static Expr createConjunctivePredicate(List<Expr> conjuncts) {
+    Expr conjunctivePred = null;
+    for (Expr expr: conjuncts) {
+      if (conjunctivePred == null) {
+        conjunctivePred = expr;
+        continue;
+      }
+      conjunctivePred = new CompoundPredicate(CompoundPredicate.Operator.AND,
+          expr, conjunctivePred);
+    }
+    return conjunctivePred;
+  }
+
+  @Override
+  public Expr clone() { return new CompoundPredicate(this); }
+
+  // Create an AND predicate between two exprs, 'lhs' and 'rhs'. If
+  // 'rhs' is null, simply return 'lhs'.
+  public static Expr createConjunction(Expr lhs, Expr rhs) {
+    if (rhs == null) return lhs;
+    return new CompoundPredicate(Operator.AND, rhs, lhs);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
new file mode 100644
index 0000000..cd01713
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -0,0 +1,553 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.log4j.Logger;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.HdfsPartition;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.PrintUtils;
+import com.cloudera.impala.thrift.TComputeStatsParams;
+import com.cloudera.impala.thrift.TPartitionStats;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Represents a COMPUTE STATS <table> and COMPUTE INCREMENTAL STATS <table> [PARTITION
+ * <part_spec>] statement for statistics collection. The former statement gathers all
+ * table and column stats for a given table and stores them in the Metastore via the
+ * CatalogService. All existing stats for that table are replaced and no existing stats
+ * are reused. The latter, incremental form, similarly computes stats for the whole table
+ * but does so by re-using stats from partitions which have 'valid' statistics. Statistics
+ * are 'valid' currently if they exist, in the future they may be expired based on recency
+ * etc.
+ *
+ * TODO: Allow more coarse/fine grained (db, column)
+ * TODO: Compute stats on complex types.
+ */
+public class ComputeStatsStmt extends StatementBase {
+  private static final Logger LOG = Logger.getLogger(ComputeStatsStmt.class);
+
+  private static String AVRO_SCHEMA_MSG_PREFIX = "Cannot COMPUTE STATS on Avro table " +
+      "'%s' because its column definitions do not match those in the Avro schema.";
+  private static String AVRO_SCHEMA_MSG_SUFFIX = "Please re-create the table with " +
+          "column definitions, e.g., using the result of 'SHOW CREATE TABLE'";
+
+  protected final TableName tableName_;
+
+  // Set during analysis.
+  protected Table table_;
+
+  // The Null count is not currently being used in optimization or run-time,
+  // and compute stats runs 2x faster in many cases when not counting NULLs.
+  private static final boolean COUNT_NULLS = false;
+
+  // Query for getting the per-partition row count and the total row count.
+  // Set during analysis.
+  protected String tableStatsQueryStr_;
+
+  // Query for getting the per-column NDVs and number of NULLs.
+  // Set during analysis.
+  protected String columnStatsQueryStr_;
+
+  // If true, stats will be gathered incrementally per-partition.
+  private boolean isIncremental_ = false;
+
+  // If true, expect the compute stats process to produce output for all partitions in the
+  // target table (only meaningful, therefore, if partitioned). This is always true for
+  // non-incremental computations. If set, expectedPartitions_ will be empty - the point
+  // of this flag is to optimise the case where all partitions are targeted.
+  private boolean expectAllPartitions_ = false;
+
+  // The list of valid partition statistics that can be used in an incremental computation
+  // without themselves being recomputed. Populated in analyze().
+  private final List<TPartitionStats> validPartStats_ = Lists.newArrayList();
+
+  // For incremental computations, the list of partitions (identified by list of partition
+  // column values) that we expect to receive results for. Used to ensure that even empty
+  // partitions emit results.
+  // TODO: Consider using partition IDs (and adding them to the child queries with a
+  // PARTITION_ID() builtin)
+  private final List<List<String>> expectedPartitions_ = Lists.newArrayList();
+
+  // If non-null, the partition that an incremental computation might apply to. Must be
+  // null if this is a non-incremental computation.
+  private PartitionSpec partitionSpec_ = null;
+
+  // The maximum number of partitions that may be explicitly selected by filter
+  // predicates. Any query that selects more than this automatically drops back to a full
+  // incremental stats recomputation.
+  // TODO: We can probably do better than this, e.g. running several queries, each of
+  // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
+  private static final int MAX_INCREMENTAL_PARTITIONS = 1000;
+
+  /**
+   * Constructor for the non-incremental form of COMPUTE STATS.
+   */
+  protected ComputeStatsStmt(TableName tableName) {
+    this(tableName, false, null);
+  }
+
+  /**
+   * Constructor for the incremental form of COMPUTE STATS. If isIncremental is true,
+   * statistics will be recomputed incrementally; if false they will be recomputed for the
+   * whole table. The partition spec partSpec can specify a single partition whose stats
+   * should be recomputed.
+   */
+  protected ComputeStatsStmt(TableName tableName, boolean isIncremental,
+      PartitionSpec partSpec) {
+    Preconditions.checkState(tableName != null && !tableName.isEmpty());
+    Preconditions.checkState(isIncremental || partSpec == null);
+    this.tableName_ = tableName;
+    this.table_ = null;
+    this.isIncremental_ = isIncremental;
+    this.partitionSpec_ = partSpec;
+    if (partitionSpec_ != null) {
+      partitionSpec_.setTableName(tableName);
+      partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    }
+  }
+
+  /**
+   * Utility method for constructing the child queries to add partition columns to both a
+   * select list and a group-by list; the former are wrapped in a cast to a string.
+   */
+  private void addPartitionCols(HdfsTable table, List<String> selectList,
+      List<String> groupByCols) {
+    for (int i = 0; i < table.getNumClusteringCols(); ++i) {
+      String colRefSql = ToSqlUtils.getIdentSql(table.getColumns().get(i).getName());
+      groupByCols.add(colRefSql);
+      // For the select list, wrap the group by columns in a cast to string because
+      // the Metastore stores them as strings.
+      selectList.add(colRefSql);
+    }
+  }
+
+  private List<String> getBaseColumnStatsQuerySelectList(Analyzer analyzer) {
+    List<String> columnStatsSelectList = Lists.newArrayList();
+    // For Hdfs tables, exclude partition columns from stats gathering because Hive
+    // cannot store them as part of the non-partition column stats. For HBase tables,
+    // include the single clustering column (the row key).
+    int startColIdx = (table_ instanceof HBaseTable) ? 0 : table_.getNumClusteringCols();
+    final String ndvUda = isIncremental_ ? "NDV_NO_FINALIZE" : "NDV";
+
+    for (int i = startColIdx; i < table_.getColumns().size(); ++i) {
+      Column c = table_.getColumns().get(i);
+      Type type = c.getType();
+
+      // Ignore columns with an invalid/unsupported type. For example, complex types in
+      // an HBase-backed table will appear as invalid types.
+      if (!type.isValid() || !type.isSupported()
+          || c.getType().isComplexType()) {
+        continue;
+      }
+      // NDV approximation function. Add explicit alias for later identification when
+      // updating the Metastore.
+      String colRefSql = ToSqlUtils.getIdentSql(c.getName());
+      columnStatsSelectList.add(ndvUda + "(" + colRefSql + ") AS " + colRefSql);
+
+      if (COUNT_NULLS) {
+        // Count the number of NULL values.
+        columnStatsSelectList.add("COUNT(IF(" + colRefSql + " IS NULL, 1, NULL))");
+      } else {
+        // Using -1 to indicate "unknown". We need cast to BIGINT because backend expects
+        // an i64Val as the number of NULLs returned by the COMPUTE STATS column stats
+        // child query. See CatalogOpExecutor::SetColumnStats(). If we do not cast, then
+        // the -1 will be treated as TINYINT resulting a 0 to be placed in the #NULLs
+        // column (see IMPALA-1068).
+        columnStatsSelectList.add("CAST(-1 as BIGINT)");
+      }
+
+      // For STRING columns also compute the max and avg string length.
+      if (type.isStringType()) {
+        columnStatsSelectList.add("MAX(length(" + colRefSql + "))");
+        columnStatsSelectList.add("AVG(length(" + colRefSql + "))");
+      } else {
+        // For non-STRING columns we use the fixed size of the type.
+        // We store the same information for all types to avoid having to
+        // treat STRING columns specially in the BE CatalogOpExecutor.
+        Integer typeSize = type.getPrimitiveType().getSlotSize();
+        columnStatsSelectList.add(typeSize.toString());
+        columnStatsSelectList.add("CAST(" + typeSize.toString() + " as DOUBLE)");
+      }
+
+      if (isIncremental_) {
+        // Need the count in order to properly combine per-partition column stats
+        columnStatsSelectList.add("COUNT(" + colRefSql + ")");
+      }
+    }
+    return columnStatsSelectList;
+  }
+
+  /**
+   * Constructs two queries to compute statistics for 'tableName_', if that table exists
+   * (although if we can detect that no work needs to be done for either query, that query
+   * will be 'null' and not executed).
+   *
+   * The first query computes the number of rows (on a per-partition basis if the table is
+   * partitioned) and has the form "SELECT COUNT(*) FROM tbl GROUP BY part_col1,
+   * part_col2...", with an optional WHERE clause for incremental computation (see below).
+   *
+   * The second query computes the NDV estimate, the average width, the maximum width and,
+   * optionally, the number of nulls for each column. For non-partitioned tables (or
+   * non-incremental computations), the query is simple:
+   *
+   * SELECT NDV(col), COUNT(<nulls>), MAX(length(col)), AVG(length(col)) FROM tbl
+   *
+   * (For non-string columns, the widths are hard-coded as they are known at query
+   * construction time).
+   *
+   * If computation is incremental (i.e. the original statement was COMPUTE INCREMENTAL
+   * STATS.., and the underlying table is a partitioned HdfsTable), some modifications are
+   * made to the non-incremental per-column query. First, a different UDA,
+   * NDV_NO_FINALIZE() is used to retrieve and serialise the intermediate state from each
+   * column. Second, the results are grouped by partition, as with the row count query, so
+   * that the intermediate NDV computation state can be stored per-partition. The number
+   * of rows per-partition are also recorded.
+   *
+   * For both the row count query, and the column stats query, the query's WHERE clause is
+   * used to restrict execution only to partitions that actually require new statistics to
+   * be computed.
+   *
+   * SELECT NDV_NO_FINALIZE(col), <nulls, max, avg>, COUNT(col) FROM tbl
+   * GROUP BY part_col1, part_col2, ...
+   * WHERE ((part_col1 = p1_val1) AND (part_col2 = p1_val2)) OR
+   *       ((part_col1 = p2_val1) AND (part_col2 = p2_val2)) OR ...
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+    String sqlTableName = table_.getTableName().toSql();
+    if (table_ instanceof View) {
+      throw new AnalysisException(String.format(
+          "COMPUTE STATS not supported for view %s", sqlTableName));
+    }
+
+    if (!(table_ instanceof HdfsTable)) {
+      if (partitionSpec_ != null) {
+        throw new AnalysisException("COMPUTE INCREMENTAL ... PARTITION not supported " +
+            "for non-HDFS table " + table_.getTableName());
+      }
+      isIncremental_ = false;
+    }
+
+    // Ensure that we write an entry for every partition if this isn't incremental
+    if (!isIncremental_) expectAllPartitions_ = true;
+
+    HdfsTable hdfsTable = null;
+    if (table_ instanceof HdfsTable) {
+      hdfsTable = (HdfsTable)table_;
+      if (isIncremental_ && hdfsTable.getNumClusteringCols() == 0 &&
+          partitionSpec_ != null) {
+          throw new AnalysisException(String.format(
+              "Can't compute PARTITION stats on an unpartitioned table: %s",
+              sqlTableName));
+      } else if (partitionSpec_ != null) {
+          partitionSpec_.setPartitionShouldExist();
+          partitionSpec_.analyze(analyzer);
+          for (PartitionKeyValue kv: partitionSpec_.getPartitionSpecKeyValues()) {
+            // TODO: We could match the dynamic keys (i.e. as wildcards) as well, but that
+            // would involve looping over all partitions and seeing which match the
+            // partition spec.
+            if (!kv.isStatic()) {
+              throw new AnalysisException("All partition keys must have values: " +
+                  kv.toString());
+            }
+          }
+      }
+      // For incremental stats, estimate the size of intermediate stats and report an
+      // error if the estimate is greater than MAX_INCREMENTAL_STATS_SIZE_BYTES.
+      if (isIncremental_) {
+        long statsSizeEstimate = hdfsTable.getColumns().size() *
+            hdfsTable.getPartitions().size() * HdfsTable.STATS_SIZE_PER_COLUMN_BYTES;
+        if (statsSizeEstimate > HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES) {
+          LOG.error("Incremental stats size estimate for table " + hdfsTable.getName() +
+              " exceeded " + HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES + ", estimate = "
+              + statsSizeEstimate);
+          throw new AnalysisException("Incremental stats size estimate exceeds "
+              + PrintUtils.printBytes(HdfsTable.MAX_INCREMENTAL_STATS_SIZE_BYTES)
+              + ". Please try COMPUTE STATS instead.");
+        }
+      }
+    }
+
+    // Build partition filters that only select partitions without valid statistics for
+    // incremental computation.
+    List<String> filterPreds = Lists.newArrayList();
+    if (isIncremental_) {
+      if (partitionSpec_ == null) {
+        // If any column does not have stats, we recompute statistics for all partitions
+        // TODO: need a better way to invalidate stats for all partitions, so that we can
+        // use this logic to only recompute new / changed columns.
+        boolean tableIsMissingColStats = false;
+
+        // We'll warn the user if a column is missing stats (and therefore we rescan the
+        // whole table), but if all columns are missing stats, the table just doesn't have
+        // any stats and there's no need to warn.
+        boolean allColumnsMissingStats = true;
+        String exampleColumnMissingStats = null;
+        // Partition columns always have stats, so exclude them from this search
+        for (Column col: table_.getNonClusteringColumns()) {
+          if (!col.getStats().hasStats()) {
+            if (!tableIsMissingColStats) {
+              tableIsMissingColStats = true;
+              exampleColumnMissingStats = col.getName();
+            }
+          } else {
+            allColumnsMissingStats = false;
+          }
+        }
+
+        if (tableIsMissingColStats && !allColumnsMissingStats) {
+          analyzer.addWarning("Column " + exampleColumnMissingStats +
+              " does not have statistics, recomputing stats for the whole table");
+        }
+
+        for (HdfsPartition p: hdfsTable.getPartitions()) {
+          if (p.isDefaultPartition()) continue;
+          TPartitionStats partStats = p.getPartitionStats();
+          if (!p.hasIncrementalStats() || tableIsMissingColStats) {
+            if (partStats == null) LOG.trace(p.toString() + " does not have stats");
+            if (!tableIsMissingColStats) filterPreds.add(p.getConjunctSql());
+            List<String> partValues = Lists.newArrayList();
+            for (LiteralExpr partValue: p.getPartitionValues()) {
+              partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
+                  "NULL"));
+            }
+            expectedPartitions_.add(partValues);
+          } else {
+            LOG.trace(p.toString() + " does have statistics");
+            validPartStats_.add(partStats);
+          }
+        }
+        if (expectedPartitions_.size() == hdfsTable.getPartitions().size() - 1) {
+          expectedPartitions_.clear();
+          expectAllPartitions_ = true;
+        }
+      } else {
+        // Always compute stats on a particular partition when told to.
+        List<String> partitionConjuncts = Lists.newArrayList();
+        for (PartitionKeyValue kv: partitionSpec_.getPartitionSpecKeyValues()) {
+          partitionConjuncts.add(kv.toPredicateSql());
+        }
+        filterPreds.add("(" + Joiner.on(" AND ").join(partitionConjuncts) + ")");
+        HdfsPartition targetPartition =
+            hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
+        List<String> partValues = Lists.newArrayList();
+        for (LiteralExpr partValue: targetPartition.getPartitionValues()) {
+          partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
+              "NULL"));
+        }
+        expectedPartitions_.add(partValues);
+        for (HdfsPartition p: hdfsTable.getPartitions()) {
+          if (p.isDefaultPartition()) continue;
+          if (p == targetPartition) continue;
+          TPartitionStats partStats = p.getPartitionStats();
+          if (partStats != null) validPartStats_.add(partStats);
+        }
+      }
+
+      if (filterPreds.size() == 0 && validPartStats_.size() != 0) {
+        LOG.info("No partitions selected for incremental stats update");
+        analyzer.addWarning("No partitions selected for incremental stats update");
+        return;
+      }
+    }
+
+    if (filterPreds.size() > MAX_INCREMENTAL_PARTITIONS) {
+      // TODO: Consider simply running for MAX_INCREMENTAL_PARTITIONS partitions, and then
+      // advising the user to iterate.
+      analyzer.addWarning(
+          "Too many partitions selected, doing full recomputation of incremental stats");
+      filterPreds.clear();
+      validPartStats_.clear();
+    }
+
+    List<String> groupByCols = Lists.newArrayList();
+    List<String> partitionColsSelectList = Lists.newArrayList();
+    // Only add group by clause for HdfsTables.
+    if (hdfsTable != null) {
+      if (hdfsTable.isAvroTable()) checkIncompleteAvroSchema(hdfsTable);
+      addPartitionCols(hdfsTable, partitionColsSelectList, groupByCols);
+    }
+
+    // Query for getting the per-partition row count and the total row count.
+    StringBuilder tableStatsQueryBuilder = new StringBuilder("SELECT ");
+    List<String> tableStatsSelectList = Lists.newArrayList();
+    tableStatsSelectList.add("COUNT(*)");
+
+    tableStatsSelectList.addAll(partitionColsSelectList);
+    tableStatsQueryBuilder.append(Joiner.on(", ").join(tableStatsSelectList));
+    tableStatsQueryBuilder.append(" FROM " + sqlTableName);
+
+    // Query for getting the per-column NDVs and number of NULLs.
+    List<String> columnStatsSelectList = getBaseColumnStatsQuerySelectList(analyzer);
+
+    if (isIncremental_) columnStatsSelectList.addAll(partitionColsSelectList);
+
+    StringBuilder columnStatsQueryBuilder = new StringBuilder("SELECT ");
+    columnStatsQueryBuilder.append(Joiner.on(", ").join(columnStatsSelectList));
+    columnStatsQueryBuilder.append(" FROM " + sqlTableName);
+
+    // Add the WHERE clause to filter out partitions that we don't want to compute
+    // incremental stats for. While this is a win in most situations, we would like to
+    // avoid this where it does no useful work (i.e. it selects all rows). This happens
+    // when there are no existing valid partitions (so all partitions will have been
+    // selected in) and there is no partition spec (so no single partition was explicitly
+    // selected in).
+    if (filterPreds.size() > 0 &&
+        (validPartStats_.size() > 0 || partitionSpec_ != null)) {
+      String filterClause = " WHERE " + Joiner.on(" OR ").join(filterPreds);
+      columnStatsQueryBuilder.append(filterClause);
+      tableStatsQueryBuilder.append(filterClause);
+    }
+
+    if (groupByCols.size() > 0) {
+      String groupBy = " GROUP BY " + Joiner.on(", ").join(groupByCols);
+      if (isIncremental_) columnStatsQueryBuilder.append(groupBy);
+      tableStatsQueryBuilder.append(groupBy);
+    }
+
+    tableStatsQueryStr_ = tableStatsQueryBuilder.toString();
+    LOG.debug("Table stats query: " + tableStatsQueryStr_);
+
+    if (columnStatsSelectList.isEmpty()) {
+      // Table doesn't have any columns that we can compute stats for.
+      LOG.info("No supported column types in table " + table_.getTableName() +
+          ", no column statistics will be gathered.");
+      columnStatsQueryStr_ = null;
+      return;
+    }
+
+    columnStatsQueryStr_ = columnStatsQueryBuilder.toString();
+    LOG.debug("Column stats query: " + columnStatsQueryStr_);
+  }
+
+  /**
+   * Checks whether the column definitions from the CREATE TABLE stmt match the columns
+   * in the Avro schema. If there is a mismatch, then COMPUTE STATS cannot update the
+   * statistics in the Metastore's backend DB due to HIVE-6308. Throws an
+   * AnalysisException for such ill-created Avro tables. Does nothing if
+   * the column definitions match the Avro schema exactly.
+   */
+  private void checkIncompleteAvroSchema(HdfsTable table) throws AnalysisException {
+    Preconditions.checkState(table.isAvroTable());
+    org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable();
+    // The column definitions from 'CREATE TABLE (column definitions) ...'
+    Iterator<FieldSchema> colDefs = msTable.getSd().getCols().iterator();
+    // The columns derived from the Avro schema file or literal schema.
+    // Inconsistencies between the Avro-schema columns and the column definitions
+    // are sometimes resolved in the CREATE TABLE, and sometimes not (see below).
+    Iterator<Column> avroSchemaCols = table.getColumns().iterator();
+    // Skip partition columns from 'table' since those are not present in
+    // the msTable field schemas.
+    for (int i = 0; i < table.getNumClusteringCols(); ++i) {
+      if (avroSchemaCols.hasNext()) avroSchemaCols.next();
+    }
+    int pos = 0;
+    while (colDefs.hasNext() || avroSchemaCols.hasNext()) {
+      if (colDefs.hasNext() && avroSchemaCols.hasNext()) {
+        FieldSchema colDef = colDefs.next();
+        Column avroSchemaCol = avroSchemaCols.next();
+        // Check that the column names are identical. Ignore mismatched types
+        // as those will either fail in the scan or succeed.
+        if (!colDef.getName().equalsIgnoreCase(avroSchemaCol.getName())) {
+          throw new AnalysisException(
+              String.format(AVRO_SCHEMA_MSG_PREFIX +
+                  "\nDefinition of column '%s' of type '%s' does not match " +
+                  "the Avro-schema column '%s' of type '%s' at position '%s'.\n" +
+                  AVRO_SCHEMA_MSG_SUFFIX,
+                  table.getName(), colDef.getName(), colDef.getType(),
+                  avroSchemaCol.getName(), avroSchemaCol.getType(), pos));
+        }
+      }
+      // The following two cases are typically not possible because Hive resolves
+      // inconsistencies between the column-definition list and the Avro schema if a
+      // column-definition list was given in the CREATE TABLE (having no column
+      // definitions at all results in HIVE-6308). Even so, we check these cases for
+      // extra safety. COMPUTE STATS could be made to succeed in special instances of
+      // the cases below but we chose to throw an AnalysisException to avoid confusion
+      // because this scenario "should" never arise as mentioned above.
+      if (colDefs.hasNext() && !avroSchemaCols.hasNext()) {
+        FieldSchema colDef = colDefs.next();
+        throw new AnalysisException(
+            String.format(AVRO_SCHEMA_MSG_PREFIX +
+                "\nMissing Avro-schema column corresponding to column " +
+                "definition '%s' of type '%s' at position '%s'.\n" +
+                AVRO_SCHEMA_MSG_SUFFIX,
+                table.getName(), colDef.getName(), colDef.getType(), pos));
+      }
+      if (!colDefs.hasNext() && avroSchemaCols.hasNext()) {
+        Column avroSchemaCol = avroSchemaCols.next();
+        throw new AnalysisException(
+            String.format(AVRO_SCHEMA_MSG_PREFIX +
+                "\nMissing column definition corresponding to Avro-schema " +
+                "column '%s' of type '%s' at position '%s'.\n" +
+                AVRO_SCHEMA_MSG_SUFFIX,
+                table.getName(), avroSchemaCol.getName(), avroSchemaCol.getType(), pos));
+      }
+      ++pos;
+    }
+  }
+
+  public String getTblStatsQuery() { return tableStatsQueryStr_; }
+  public String getColStatsQuery() { return columnStatsQueryStr_; }
+
+  @Override
+  public String toSql() {
+    if (!isIncremental_) {
+      return "COMPUTE STATS " + tableName_.toSql();
+    } else {
+      return "COMPUTE INCREMENTAL STATS " + tableName_.toSql() +
+          partitionSpec_ == null ? "" : partitionSpec_.toSql();
+    }
+  }
+
+  public TComputeStatsParams toThrift() {
+    TComputeStatsParams params = new TComputeStatsParams();
+    params.setTable_name(new TTableName(table_.getDb().getName(), table_.getName()));
+    params.setTbl_stats_query(tableStatsQueryStr_);
+    if (columnStatsQueryStr_ != null) {
+      params.setCol_stats_query(columnStatsQueryStr_);
+    } else {
+      params.setCol_stats_queryIsSet(false);
+    }
+
+    params.setIs_incremental(isIncremental_);
+    params.setExisting_part_stats(validPartStats_);
+    params.setExpect_all_partitions(expectAllPartitions_);
+    if (!expectAllPartitions_) params.setExpected_partitions(expectedPartitions_);
+    if (isIncremental_) {
+      params.setNum_partition_cols(((HdfsTable)table_).getNumClusteringCols());
+    }
+    return params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
new file mode 100644
index 0000000..1ee6fd4
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
@@ -0,0 +1,97 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.extdatasource.ApiVersion;
+import com.cloudera.impala.thrift.TCreateDataSourceParams;
+import com.cloudera.impala.thrift.TDataSource;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a CREATE DATA SOURCE statement.
+ */
+public class CreateDataSrcStmt extends StatementBase {
+  private final String dataSrcName_;
+  private final String className_;
+  private final String apiVersionString_;
+  private final HdfsUri location_;
+  private final boolean ifNotExists_;
+  private ApiVersion apiVersion_;
+
+  public CreateDataSrcStmt(String dataSrcName, HdfsUri location, String className,
+      String apiVersionString, boolean ifNotExists) {
+    Preconditions.checkNotNull(dataSrcName);
+    Preconditions.checkNotNull(className);
+    Preconditions.checkNotNull(apiVersionString);
+    Preconditions.checkNotNull(location);
+    dataSrcName_ = dataSrcName.toLowerCase();
+    location_ = location;
+    className_ = className;
+    apiVersionString_ = apiVersionString;
+    ifNotExists_ = ifNotExists;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (!MetaStoreUtils.validateName(dataSrcName_)) {
+      throw new AnalysisException("Invalid data source name: " + dataSrcName_);
+    }
+    if (!ifNotExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) != null) {
+      throw new AnalysisException(Analyzer.DATA_SRC_ALREADY_EXISTS_ERROR_MSG +
+          dataSrcName_);
+    }
+
+    apiVersion_ = ApiVersion.parseApiVersion(apiVersionString_);
+    if (apiVersion_ == null) {
+      throw new AnalysisException("Invalid API version: '" + apiVersionString_ +
+          "'. Valid API versions: " + Joiner.on(", ").join(ApiVersion.values()));
+    }
+
+    location_.analyze(analyzer, Privilege.ALL, FsAction.READ);
+    // TODO: Check class exists and implements API version
+    // TODO: authorization check
+  }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("CREATE DATA SOURCE ");
+    if (ifNotExists_) sb.append("IF NOT EXISTS ");
+    sb.append(dataSrcName_);
+    sb.append(" LOCATION '");
+    sb.append(location_.getLocation());
+    sb.append("' CLASS '");
+    sb.append(className_);
+    sb.append("' API_VERSION '");
+    sb.append(apiVersion_.name());
+    sb.append("'");
+    return sb.toString();
+  }
+
+  public TCreateDataSourceParams toThrift() {
+    return new TCreateDataSourceParams(
+        new TDataSource(dataSrcName_, location_.toString(), className_,
+            apiVersion_.name())).setIf_not_exists(ifNotExists_);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
new file mode 100644
index 0000000..3dedd8b
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
@@ -0,0 +1,102 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TCreateDbParams;
+
+/**
+ * Represents a CREATE DATABASE statement
+ */
+public class CreateDbStmt extends StatementBase {
+  private final String dbName_;
+  private final HdfsUri location_;
+  private final String comment_;
+  private final boolean ifNotExists_;
+
+  /**
+   * Creates a database with the given name.
+   */
+  public CreateDbStmt(String dbName) {
+    this(dbName, null, null, false);
+  }
+
+  /**
+   * Creates a database with the given name, comment, and HDFS table storage location.
+   * New tables created in the database inherit the location property for their default
+   * storage location. Create database will throw an error if the database already exists
+   * unless the ifNotExists is true.
+   */
+  public CreateDbStmt(String dbName, String comment, HdfsUri location,
+      boolean ifNotExists) {
+    this.dbName_ = dbName;
+    this.comment_ = comment;
+    this.location_ = location;
+    this.ifNotExists_ = ifNotExists;
+  }
+
+  public String getComment() { return comment_; }
+  public String getDb() { return dbName_; }
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public HdfsUri getLocation() { return location_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("CREATE DATABASE");
+    if (ifNotExists_) sb.append(" IF NOT EXISTS");
+    sb.append(dbName_);
+    if (comment_ != null) sb.append(" COMMENT '" + comment_ + "'");
+    if (location_ != null) sb.append(" LOCATION '" + location_ + "'");
+    return sb.toString();
+  }
+
+  public TCreateDbParams toThrift() {
+    TCreateDbParams params = new TCreateDbParams();
+    params.setDb(getDb());
+    params.setComment(getComment());
+    params.setLocation(location_ == null ? null : location_.toString());
+    params.setIf_not_exists(getIfNotExists());
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    // Check whether the db name meets the Metastore's requirements.
+    if (!MetaStoreUtils.validateName(dbName_)) {
+      throw new AnalysisException("Invalid database name: " + dbName_);
+    }
+
+    // Note: It is possible that a database with the same name was created external to
+    // this Impala instance. If that happens, the caller will not get an
+    // AnalysisException when creating the database, they will get a Hive
+    // AlreadyExistsException once the request has been sent to the metastore.
+    Db db = analyzer.getDb(getDb(), Privilege.CREATE, false);
+    if (db != null && !ifNotExists_) {
+      throw new AnalysisException(Analyzer.DB_ALREADY_EXISTS_ERROR_MSG + getDb());
+    }
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
new file mode 100644
index 0000000..ef90b8a
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.Role;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TCreateDropRoleParams;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a "CREATE ROLE" or "DROP ROLE" statement.
+ */
+public class CreateDropRoleStmt extends AuthorizationStmt {
+  private final String roleName_;
+  private final boolean isDropRole_;
+
+  // Set in analysis
+  private String user_;
+
+  public CreateDropRoleStmt(String roleName, boolean isDropRole) {
+    Preconditions.checkNotNull(roleName);
+    roleName_ = roleName;
+    isDropRole_ = isDropRole;
+  }
+
+  @Override
+  public String toSql() {
+    return String.format("%s ROLE %s", roleName_, isDropRole_ ? "DROP" : "CREATE");
+  }
+
+  public TCreateDropRoleParams toThrift() {
+    TCreateDropRoleParams params = new TCreateDropRoleParams();
+    params.setRole_name(roleName_);
+    params.setIs_drop(isDropRole_);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Role existingRole = analyzer.getCatalog().getAuthPolicy().getRole(roleName_);
+    if (isDropRole_ && existingRole == null) {
+      throw new AnalysisException(String.format("Role '%s' does not exist.", roleName_));
+    } else if (!isDropRole_ && existingRole != null) {
+      throw new AnalysisException(String.format("Role '%s' already exists.", roleName_));
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
new file mode 100644
index 0000000..ebfd7b6
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
@@ -0,0 +1,206 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import com.cloudera.impala.authorization.AuthorizeableFn;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequest;
+import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TCreateFunctionParams;
+import com.cloudera.impala.thrift.TFunctionBinaryType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Base class for CREATE [] FUNCTION.
+ */
+public abstract class CreateFunctionStmtBase extends StatementBase {
+
+  // Enums for valid keys for optional arguments.
+  public enum OptArg {
+    COMMENT,
+    SYMBOL,           // Only used for Udfs
+    PREPARE_FN,       // Only used for Udfs
+    CLOSE_FN,         // Only used for Udfs
+    UPDATE_FN,        // Only used for Udas
+    INIT_FN,          // Only used for Udas
+    SERIALIZE_FN,     // Only used for Udas
+    MERGE_FN,         // Only used for Udas
+    FINALIZE_FN       // Only used for Udas
+  };
+
+  protected final FunctionName fnName_;
+  protected final FunctionArgs args_;
+  protected final TypeDef retTypeDef_;
+  protected final HdfsUri location_;
+  protected final HashMap<CreateFunctionStmtBase.OptArg, String> optArgs_;
+  protected final boolean ifNotExists_;
+
+  // Result of analysis.
+  protected Function fn_;
+
+  // Db object for function fn_. Set in analyze().
+  protected Db db_;
+
+  // Set in analyze()
+  protected String sqlString_;
+
+  protected CreateFunctionStmtBase(FunctionName fnName, FunctionArgs args,
+      TypeDef retTypeDef, HdfsUri location, boolean ifNotExists,
+      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
+    // The return and arg types must either be both null or non-null.
+    Preconditions.checkState(!(args == null ^ retTypeDef == null));
+    fnName_ = fnName;
+    args_ = args;
+    retTypeDef_ = retTypeDef;
+    location_ = location;
+    ifNotExists_ = ifNotExists;
+    optArgs_ = optArgs;
+  }
+
+  public String getComment() { return optArgs_.get(OptArg.COMMENT); }
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public boolean hasSignature() { return args_ != null; }
+
+  public TCreateFunctionParams toThrift() {
+    TCreateFunctionParams params = new TCreateFunctionParams(fn_.toThrift());
+    params.setIf_not_exists(getIfNotExists());
+    params.setFn(fn_.toThrift());
+    return params;
+  }
+
+  // Returns optArg[key], first validating that it is set.
+  protected String checkAndGetOptArg(OptArg key)
+      throws AnalysisException {
+    if (!optArgs_.containsKey(key)) {
+      throw new AnalysisException("Argument '" + key + "' must be set.");
+    }
+    return optArgs_.get(key);
+  }
+
+  protected void checkOptArgNotSet(OptArg key)
+      throws AnalysisException {
+    if (optArgs_.containsKey(key)) {
+      throw new AnalysisException("Optional argument '" + key + "' should not be set.");
+    }
+  }
+
+  // Returns the function's binary type based on the path extension.
+  private TFunctionBinaryType getBinaryType() throws AnalysisException {
+    TFunctionBinaryType binaryType = null;
+    String binaryPath = fn_.getLocation().getLocation();
+    int suffixIndex = binaryPath.lastIndexOf(".");
+    if (suffixIndex != -1) {
+      String suffix = binaryPath.substring(suffixIndex + 1);
+      if (suffix.equalsIgnoreCase("jar")) {
+        binaryType = TFunctionBinaryType.JAVA;
+      } else if (suffix.equalsIgnoreCase("so")) {
+        binaryType = TFunctionBinaryType.NATIVE;
+      } else if (suffix.equalsIgnoreCase("ll")) {
+        binaryType = TFunctionBinaryType.IR;
+      }
+    }
+    if (binaryType == null) {
+      throw new AnalysisException("Unknown binary type: '" + binaryPath +
+          "'. Binary must end in .jar, .so or .ll");
+    }
+    return binaryType;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    // Validate function name is legal
+    fnName_.analyze(analyzer);
+
+    if (hasSignature()) {
+      // Validate function arguments and return type.
+      args_.analyze(analyzer);
+      retTypeDef_.analyze(analyzer);
+      fn_ = createFunction(fnName_, args_.getArgTypes(), retTypeDef_.getType(),
+          args_.hasVarArgs());
+    } else {
+      fn_ = createFunction(fnName_, null, null, false);
+    }
+
+    // For now, if authorization is enabled, the user needs ALL on the server
+    // to create functions.
+    // TODO: this is not the right granularity but acceptable for now.
+    analyzer.registerPrivReq(new PrivilegeRequest(
+        new AuthorizeableFn(fn_.signatureString()), Privilege.ALL));
+
+    Db builtinsDb = analyzer.getCatalog().getDb(Catalog.BUILTINS_DB);
+    if (builtinsDb.containsFunction(fn_.getName())) {
+      throw new AnalysisException("Function cannot have the same name as a builtin: " +
+          fn_.getFunctionName().getFunction());
+    }
+
+    db_ = analyzer.getDb(fn_.dbName(), Privilege.CREATE);
+    Function existingFn = db_.getFunction(fn_, Function.CompareMode.IS_INDISTINGUISHABLE);
+    if (existingFn != null && !ifNotExists_) {
+      throw new AnalysisException(Analyzer.FN_ALREADY_EXISTS_ERROR_MSG +
+          existingFn.signatureString());
+    }
+
+    location_.analyze(analyzer, Privilege.CREATE, FsAction.READ);
+    fn_.setLocation(location_);
+
+    // Check the file type from the binary type to infer the type of the UDA
+    fn_.setBinaryType(getBinaryType());
+
+    // Forbid unsupported and complex types.
+    if (hasSignature()) {
+      List<Type> refdTypes = Lists.newArrayList(fn_.getReturnType());
+      refdTypes.addAll(Lists.newArrayList(fn_.getArgs()));
+      for (Type t: refdTypes) {
+        if (!t.isSupported() || t.isComplexType()) {
+          throw new AnalysisException(
+              String.format("Type '%s' is not supported in UDFs/UDAs.", t.toSql()));
+        }
+      }
+    } else if (fn_.getBinaryType() != TFunctionBinaryType.JAVA) {
+      throw new AnalysisException(
+          String.format("Native functions require a return type and/or " +
+              "argument types: %s", fn_.getFunctionName()));
+    }
+
+    // Check if the function can be persisted. We persist all native/IR functions
+    // and also JAVA functions added without signature. Only JAVA functions added
+    // with signatures aren't persisted.
+    if (getBinaryType() == TFunctionBinaryType.JAVA && hasSignature()) {
+      fn_.setIsPersistent(false);
+    } else {
+      fn_.setIsPersistent(true);
+    }
+  }
+
+  /**
+   * Creates a concrete function.
+   */
+  protected abstract Function createFunction(FunctionName fnName,
+      ArrayList<Type> argTypes, Type retType, boolean hasVarArgs);
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
new file mode 100644
index 0000000..cc04b04
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
@@ -0,0 +1,209 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Base class for CREATE VIEW and ALTER VIEW AS SELECT statements.
+ */
+public abstract class CreateOrAlterViewStmtBase extends StatementBase {
+  private final static Logger LOG =
+      LoggerFactory.getLogger(CreateOrAlterViewStmtBase.class);
+
+  protected final boolean ifNotExists_;
+  protected final TableName tableName_;
+  protected final ArrayList<ColumnDef> columnDefs_;
+  protected final String comment_;
+  protected final QueryStmt viewDefStmt_;
+
+  // Set during analysis
+  protected String dbName_;
+  protected String owner_;
+
+  // The original SQL-string given as view definition. Set during analysis.
+  // Corresponds to Hive's viewOriginalText.
+  protected String originalViewDef_;
+
+  // Query statement (as SQL string) that defines the View for view substitution.
+  // It is a transformation of the original view definition, e.g., to enforce the
+  // columnDefs even if the original view definition has explicit column aliases.
+  // If column definitions were given, then this "expanded" view definition
+  // wraps the original view definition in a select stmt as follows.
+  //
+  // SELECT viewName.origCol1 AS colDesc1, viewName.origCol2 AS colDesc2, ...
+  // FROM (originalViewDef) AS viewName
+  //
+  // Corresponds to Hive's viewExpandedText, but is not identical to the SQL
+  // Hive would produce in view creation.
+  protected String inlineViewDef_;
+
+  // Columns to use in the select list of the expanded SQL string and when registering
+  // this view in the metastore. Set in analysis.
+  protected ArrayList<ColumnDef> finalColDefs_;
+
+  public CreateOrAlterViewStmtBase(boolean ifNotExists, TableName tableName,
+      ArrayList<ColumnDef> columnDefs, String comment, QueryStmt viewDefStmt) {
+    Preconditions.checkNotNull(tableName);
+    Preconditions.checkNotNull(viewDefStmt);
+    this.ifNotExists_ = ifNotExists;
+    this.tableName_ = tableName;
+    this.columnDefs_ = columnDefs;
+    this.comment_ = comment;
+    this.viewDefStmt_ = viewDefStmt;
+  }
+
+  /**
+   * Sets the originalViewDef and the expanded inlineViewDef based on viewDefStmt.
+   * If columnDefs were given, checks that they do not contain duplicate column names
+   * and throws an exception if they do.
+   */
+  protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException {
+    Preconditions.checkNotNull(dbName_);
+    Preconditions.checkNotNull(owner_);
+
+    // Set the finalColDefs to reflect the given column definitions.
+    if (columnDefs_ != null) {
+      Preconditions.checkState(!columnDefs_.isEmpty());
+      if (columnDefs_.size() != viewDefStmt_.getColLabels().size()) {
+        String cmp =
+            (columnDefs_.size() > viewDefStmt_.getColLabels().size()) ? "more" : "fewer";
+        throw new AnalysisException(String.format("Column-definition list has " +
+            "%s columns (%s) than the view-definition query statement returns (%s).",
+            cmp, columnDefs_.size(), viewDefStmt_.getColLabels().size()));
+      }
+
+      finalColDefs_ = columnDefs_;
+      Preconditions.checkState(
+          columnDefs_.size() == viewDefStmt_.getBaseTblResultExprs().size());
+      for (int i = 0; i < columnDefs_.size(); ++i) {
+        // Set type in the column definition from the view-definition statement.
+        columnDefs_.get(i).setType(viewDefStmt_.getBaseTblResultExprs().get(i).getType());
+      }
+    } else {
+      // Create list of column definitions from the view-definition statement.
+      finalColDefs_ = Lists.newArrayList();
+      List<Expr> exprs = viewDefStmt_.getBaseTblResultExprs();
+      List<String> labels = viewDefStmt_.getColLabels();
+      Preconditions.checkState(exprs.size() == labels.size());
+      for (int i = 0; i < viewDefStmt_.getColLabels().size(); ++i) {
+        ColumnDef colDef = new ColumnDef(labels.get(i), null, null);
+        colDef.setType(exprs.get(i).getType());
+        finalColDefs_.add(colDef);
+      }
+    }
+
+    // Check that the column definitions have valid names, and that there are no
+    // duplicate column names.
+    Set<String> distinctColNames = Sets.newHashSet();
+    for (ColumnDef colDesc: finalColDefs_) {
+      colDesc.analyze();
+      if (!distinctColNames.add(colDesc.getColName().toLowerCase())) {
+        throw new AnalysisException("Duplicate column name: " + colDesc.getColName());
+      }
+    }
+
+    // Set original and expanded view-definition SQL strings.
+    originalViewDef_ = viewDefStmt_.toSql();
+
+    // If no column definitions were given, then the expanded view SQL is the same
+    // as the original one.
+    if (columnDefs_ == null) {
+      inlineViewDef_ = originalViewDef_;
+      return;
+    }
+
+    // Wrap the original view-definition statement into a SELECT to enforce the
+    // given column definitions.
+    StringBuilder sb = new StringBuilder();
+    sb.append("SELECT ");
+    for (int i = 0; i < finalColDefs_.size(); ++i) {
+      String colRef = ToSqlUtils.getIdentSql(viewDefStmt_.getColLabels().get(i));
+      String colAlias = ToSqlUtils.getIdentSql(finalColDefs_.get(i).getColName());
+      sb.append(String.format("%s.%s AS %s", tableName_.getTbl(), colRef, colAlias));
+      sb.append((i+1 != finalColDefs_.size()) ? ", " : "");
+    }
+    // Do not use 'AS' for table aliases because Hive only accepts them without 'AS'.
+    sb.append(String.format(" FROM (%s) %s", originalViewDef_, tableName_.getTbl()));
+    inlineViewDef_ = sb.toString();
+  }
+
+  /**
+   * Computes the column lineage graph for a create/alter view statetement.
+   */
+  protected void computeLineageGraph(Analyzer analyzer) {
+    ColumnLineageGraph graph = analyzer.getColumnLineageGraph();
+    List<String> colDefs = Lists.newArrayList();
+    for (ColumnDef colDef: finalColDefs_) {
+      colDefs.add(dbName_ + "." + getTbl() + "." + colDef.getColName());
+    }
+    graph.addTargetColumnLabels(colDefs);
+    graph.computeLineageGraph(viewDefStmt_.getResultExprs(), analyzer);
+    LOG.trace("lineage: " + graph.debugString());
+  }
+
+  public TCreateOrAlterViewParams toThrift() {
+    TCreateOrAlterViewParams params = new TCreateOrAlterViewParams();
+    params.setView_name(new TTableName(getDb(), getTbl()));
+    for (ColumnDef col: finalColDefs_) {
+      params.addToColumns(col.toThrift());
+    }
+    params.setOwner(getOwner());
+    params.setIf_not_exists(getIfNotExists());
+    params.setOriginal_view_def(originalViewDef_);
+    params.setExpanded_view_def(inlineViewDef_);
+    if (comment_ != null) params.setComment(comment_);
+    return params;
+  }
+
+  /**
+   * Can only be called after analysis, returns the name of the database the table will
+   * be created within.
+   */
+  public String getDb() {
+    Preconditions.checkNotNull(dbName_);
+    return dbName_;
+  }
+
+  /**
+   * Can only be called after analysis, returns the owner of the view to be created.
+   */
+  public String getOwner() {
+    Preconditions.checkNotNull(owner_);
+    return owner_;
+  }
+
+  public List<ColumnDef> getColumnDescs() {return columnDefs_; }
+  public String getComment() { return comment_; }
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public String getOriginalViewDef() { return originalViewDef_; }
+  public String getInlineViewDef() { return inlineViewDef_; }
+  public String getTbl() { return tableName_.getTbl(); }
+}



[44/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateTableAsSelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateTableAsSelectStmt.java
deleted file mode 100644
index 7b59625..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableAsSelectStmt.java
+++ /dev/null
@@ -1,212 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-import java.util.EnumSet;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableId;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.service.CatalogOpExecutor;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a CREATE TABLE AS SELECT (CTAS) statement
- *
- * The statement supports an optional PARTITIONED BY clause. Its syntax and semantics
- * follow the PARTITION feature of INSERT FROM SELECT statements: inside the PARTITIONED
- * BY (...) column list the user must specify names of the columns to partition by. These
- * column names must appear in the specified order at the end of the select statement. A
- * remapping between columns of the source and destination tables is not possible, because
- * the destination table does not yet exist. Specifying static values for the partition
- * columns is also not possible, as their type needs to be deduced from columns in the
- * select statement.
- */
-public class CreateTableAsSelectStmt extends StatementBase {
-  private final CreateTableStmt createStmt_;
-
-  // List of partition columns from the PARTITIONED BY (...) clause. Set to null if no
-  // partition was given.
-  private final List<String> partitionKeys_;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  private final InsertStmt insertStmt_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  private final static EnumSet<THdfsFileFormat> SUPPORTED_INSERT_FORMATS =
-      EnumSet.of(THdfsFileFormat.PARQUET, THdfsFileFormat.TEXT);
-
-  /**
-   * Builds a CREATE TABLE AS SELECT statement
-   */
-  public CreateTableAsSelectStmt(CreateTableStmt createStmt, QueryStmt queryStmt,
-      List<String> partitionKeys) {
-    Preconditions.checkNotNull(queryStmt);
-    Preconditions.checkNotNull(createStmt);
-    createStmt_ = createStmt;
-    partitionKeys_ = partitionKeys;
-    List<PartitionKeyValue> pkvs = null;
-    if (partitionKeys != null) {
-      pkvs = Lists.newArrayList();
-      for (String key: partitionKeys) {
-        pkvs.add(new PartitionKeyValue(key, null));
-      }
-    }
-    insertStmt_ = new InsertStmt(null, createStmt.getTblName(), false, pkvs,
-        null, queryStmt, null, false);
-  }
-
-  public QueryStmt getQueryStmt() { return insertStmt_.getQueryStmt(); }
-  public InsertStmt getInsertStmt() { return insertStmt_; }
-  public CreateTableStmt getCreateStmt() { return createStmt_; }
-  @Override
-  public String toSql() { return ToSqlUtils.getCreateTableSql(this); }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    super.analyze(analyzer);
-
-    // The analysis for CTAS happens in two phases - the first phase happens before
-    // the target table exists and we want to validate the CREATE statement and the
-    // query portion of the insert statement. If this passes, analysis will be run
-    // over the full INSERT statement. To avoid duplicate registrations of table/colRefs,
-    // create a new root analyzer and clone the query statement for this initial pass.
-    Analyzer dummyRootAnalyzer = new Analyzer(analyzer.getCatalog(),
-        analyzer.getQueryCtx(), analyzer.getAuthzConfig());
-    QueryStmt tmpQueryStmt = insertStmt_.getQueryStmt().clone();
-    try {
-      Analyzer tmpAnalyzer = new Analyzer(dummyRootAnalyzer);
-      tmpAnalyzer.setUseHiveColLabels(true);
-      tmpQueryStmt.analyze(tmpAnalyzer);
-      // Subqueries need to be rewritten by the StmtRewriter first.
-      if (analyzer.containsSubquery()) return;
-    } finally {
-      // Record missing tables in the original analyzer.
-      analyzer.getMissingTbls().addAll(dummyRootAnalyzer.getMissingTbls());
-    }
-
-    // Add the columns from the partition clause to the create statement.
-    if (partitionKeys_ != null) {
-      int colCnt = tmpQueryStmt.getColLabels().size();
-      int partColCnt = partitionKeys_.size();
-      if (partColCnt >= colCnt) {
-        throw new AnalysisException(String.format("Number of partition columns (%s) " +
-            "must be smaller than the number of columns in the select statement (%s).",
-            partColCnt, colCnt));
-      }
-      int firstCol = colCnt - partColCnt;
-      for (int i = firstCol, j = 0; i < colCnt; ++i, ++j) {
-        String partitionLabel = partitionKeys_.get(j);
-        String colLabel = tmpQueryStmt.getColLabels().get(i);
-
-        // Ensure that partition columns are named and positioned at end of
-        // input column list.
-        if (!partitionLabel.equals(colLabel)) {
-          throw new AnalysisException(String.format("Partition column name " +
-              "mismatch: %s != %s", partitionLabel, colLabel));
-        }
-
-        ColumnDef colDef = new ColumnDef(colLabel, null, null);
-        colDef.setType(tmpQueryStmt.getBaseTblResultExprs().get(i).getType());
-        createStmt_.getPartitionColumnDefs().add(colDef);
-      }
-      // Remove partition columns from table column list.
-      tmpQueryStmt.getColLabels().subList(firstCol, colCnt).clear();
-    }
-
-    // Add the columns from the select statement to the create statement.
-    int colCnt = tmpQueryStmt.getColLabels().size();
-    createStmt_.getColumnDefs().clear();
-    for (int i = 0; i < colCnt; ++i) {
-      ColumnDef colDef = new ColumnDef(
-          tmpQueryStmt.getColLabels().get(i), null, null);
-      colDef.setType(tmpQueryStmt.getBaseTblResultExprs().get(i).getType());
-      createStmt_.getColumnDefs().add(colDef);
-    }
-    createStmt_.analyze(analyzer);
-
-    if (!SUPPORTED_INSERT_FORMATS.contains(createStmt_.getFileFormat())) {
-      throw new AnalysisException(String.format("CREATE TABLE AS SELECT " +
-          "does not support (%s) file format. Supported formats are: (%s)",
-          createStmt_.getFileFormat().toString().replace("_", ""),
-          "PARQUET, TEXTFILE"));
-    }
-
-    // The full privilege check for the database will be done as part of the INSERT
-    // analysis.
-    Db db = analyzer.getDb(createStmt_.getDb(), Privilege.ANY);
-    if (db == null) {
-      throw new AnalysisException(
-          Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + createStmt_.getDb());
-    }
-
-    // Running analysis on the INSERT portion of the CTAS requires the target INSERT
-    // table to "exist". For CTAS the table does not exist yet, so create a "temp"
-    // table to run analysis against. The schema of this temp table should exactly
-    // match the schema of the table that will be created by running the CREATE
-    // statement.
-    org.apache.hadoop.hive.metastore.api.Table msTbl =
-        CatalogOpExecutor.createMetaStoreTable(createStmt_.toThrift());
-
-    try (MetaStoreClient client = analyzer.getCatalog().getMetaStoreClient()) {
-      // Set a valid location of this table using the same rules as the metastore. If the
-      // user specified a location for the table this will be a no-op.
-      msTbl.getSd().setLocation(analyzer.getCatalog().getTablePath(msTbl).toString());
-
-      // Create a "temp" table based off the given metastore.api.Table object. Normally,
-      // the CatalogService assigns all table IDs, but in this case we need to assign the
-      // "temp" table an ID locally. This table ID cannot conflict with any table in the
-      // SelectStmt (or the BE will be very confused). To ensure the ID is unique within
-      // this query, just assign it the invalid table ID. The CatalogServer will assign
-      // this table a proper ID once it is created there as part of the CTAS execution.
-      Table table = Table.fromMetastoreTable(TableId.createInvalidId(), db, msTbl);
-      Preconditions.checkState(table != null &&
-          (table instanceof HdfsTable || table instanceof KuduTable));
-
-      table.load(true, client.getHiveClient(), msTbl);
-      insertStmt_.setTargetTable(table);
-    } catch (TableLoadingException e) {
-      throw new AnalysisException(e.getMessage(), e);
-    } catch (Exception e) {
-      throw new AnalysisException(e.getMessage(), e);
-    }
-
-    // Finally, run analysis on the insert statement.
-    insertStmt_.analyze(analyzer);
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    insertStmt_.reset();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateTableDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableDataSrcStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateTableDataSrcStmt.java
deleted file mode 100644
index 0faf881..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableDataSrcStmt.java
+++ /dev/null
@@ -1,95 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_API_VER;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_CLASS;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_DATA_SRC_NAME;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_INIT_STRING;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_LOCATION;
-
-import java.util.List;
-import java.util.Map;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.permission.FsAction;
-
-/**
- * Represents a CREATE TABLE statement for external data sources. Such tables
- * reference an external data source (created with a CREATE DATA SOURCE statement)
- * and the properties of that source are stored in the table properties because
- * the metastore does not store the data sources themselves.
- */
-public class CreateTableDataSrcStmt extends CreateTableStmt {
-
-  public CreateTableDataSrcStmt(TableName tableName, List<ColumnDef> columnDefs,
-      String dataSourceName, String initString, String comment, boolean ifNotExists) {
-    super(tableName, columnDefs, Lists.<ColumnDef>newArrayList(), false, comment,
-        RowFormat.DEFAULT_ROW_FORMAT, THdfsFileFormat.TEXT, null, null, ifNotExists,
-        createInitialTableProperties(dataSourceName, initString),
-        Maps.<String, String>newHashMap(), null);
-  }
-
-  /**
-   * Creates the initial map of table properties containing the name of the data
-   * source and the table init string.
-   */
-  private static Map<String, String> createInitialTableProperties(
-      String dataSourceName, String initString) {
-    Preconditions.checkNotNull(dataSourceName);
-    Map<String, String> tableProperties = Maps.newHashMap();
-    tableProperties.put(TBL_PROP_DATA_SRC_NAME, dataSourceName.toLowerCase());
-    tableProperties.put(TBL_PROP_INIT_STRING, Strings.nullToEmpty(initString));
-    return tableProperties;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    String dataSourceName = getTblProperties().get(TBL_PROP_DATA_SRC_NAME);
-    DataSource dataSource = analyzer.getCatalog().getDataSource(dataSourceName);
-    if (dataSource == null) {
-      throw new AnalysisException("Data source does not exist: " + dataSourceName);
-    }
-
-    for (ColumnDef col: getColumnDefs()) {
-      if (!DataSourceTable.isSupportedColumnType(col.getType())) {
-        throw new AnalysisException("Tables produced by an external data source do " +
-            "not support the column type: " + col.getType());
-      }
-    }
-    // Add table properties from the DataSource catalog object now that we have access
-    // to the catalog. These are stored in the table metadata because DataSource catalog
-    // objects are not currently persisted.
-    String location = dataSource.getLocation();
-    getTblProperties().put(TBL_PROP_LOCATION, location);
-    getTblProperties().put(TBL_PROP_CLASS, dataSource.getClassName());
-    getTblProperties().put(TBL_PROP_API_VER, dataSource.getApiVersion());
-    new HdfsUri(location).analyze(analyzer, Privilege.ALL, FsAction.READ);
-    // TODO: check class exists and implements API version
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeFileStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeFileStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeFileStmt.java
deleted file mode 100644
index 6695cac..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeFileStmt.java
+++ /dev/null
@@ -1,375 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-
-import parquet.hadoop.ParquetFileReader;
-import parquet.hadoop.metadata.ParquetMetadata;
-import parquet.schema.OriginalType;
-import parquet.schema.PrimitiveType;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.HdfsCompression;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-
-/**
- * Represents a CREATE TABLE tablename LIKE fileformat '/path/to/file' statement
- * where the schema is inferred from the given file. Does not partition the table by
- * default.
- */
-public class CreateTableLikeFileStmt extends CreateTableStmt {
-  private final HdfsUri schemaLocation_;
-  private final THdfsFileFormat schemaFileFormat_;
-  private final static String ERROR_MSG =
-      "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
-
-  public CreateTableLikeFileStmt(TableName tableName, THdfsFileFormat schemaFileFormat,
-      HdfsUri schemaLocation, List<ColumnDef> partitionColumnDescs,
-      boolean isExternal, String comment, RowFormat rowFormat,
-      THdfsFileFormat fileFormat, HdfsUri location, HdfsCachingOp cachingOp,
-      boolean ifNotExists, Map<String, String> tblProperties,
-      Map<String, String> serdeProperties) {
-    super(tableName, new ArrayList<ColumnDef>(), partitionColumnDescs,
-        isExternal, comment, rowFormat,
-        fileFormat, location, cachingOp, ifNotExists, tblProperties, serdeProperties,
-        null);
-    schemaLocation_ = schemaLocation;
-    schemaFileFormat_ = schemaFileFormat;
-  }
-
-  /**
-   * Reads the first block from the given HDFS file and returns the Parquet schema.
-   * Throws Analysis exception for any failure, such as failing to read the file
-   * or failing to parse the contents.
-   */
-  private static parquet.schema.MessageType loadParquetSchema(Path pathToFile)
-      throws AnalysisException {
-    try {
-      FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
-      if (!fs.isFile(pathToFile)) {
-        throw new AnalysisException("Cannot infer schema, path is not a file: " +
-                                    pathToFile);
-      }
-    } catch (IOException e) {
-      throw new AnalysisException("Failed to connect to filesystem:" + e);
-    } catch (IllegalArgumentException e) {
-      throw new AnalysisException(e.getMessage());
-    }
-    ParquetMetadata readFooter = null;
-    try {
-      readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
-          pathToFile);
-    } catch (FileNotFoundException e) {
-      throw new AnalysisException("File not found: " + e);
-    } catch (IOException e) {
-      throw new AnalysisException("Failed to open file as a parquet file: " + e);
-    } catch (RuntimeException e) {
-      // Parquet throws a generic RuntimeException when reading a non-parquet file
-      if (e.toString().contains("is not a Parquet file")) {
-        throw new AnalysisException("File is not a parquet file: " + pathToFile);
-      }
-      // otherwise, who knows what we caught, throw it back up
-      throw e;
-    }
-     return readFooter.getFileMetaData().getSchema();
-  }
-
-  /**
-   * Converts a "primitive" Parquet type to an Impala type.
-   * A primitive type is a non-nested type with no annotations.
-   */
-  private static Type convertPrimitiveParquetType(parquet.schema.Type parquetType)
-      throws AnalysisException {
-    Preconditions.checkState(parquetType.isPrimitive());
-    PrimitiveType prim = parquetType.asPrimitiveType();
-    switch (prim.getPrimitiveTypeName()) {
-      case BINARY: return Type.STRING;
-      case BOOLEAN: return Type.BOOLEAN;
-      case DOUBLE: return Type.DOUBLE;
-      case FIXED_LEN_BYTE_ARRAY:
-        throw new AnalysisException(
-            "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
-                parquetType.getName());
-      case FLOAT: return Type.FLOAT;
-      case INT32: return Type.INT;
-      case INT64: return Type.BIGINT;
-      case INT96: return Type.TIMESTAMP;
-      default:
-        Preconditions.checkState(false, "Unexpected parquet primitive type: " +
-               prim.getPrimitiveTypeName());
-        return null;
-    }
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala map Type. We support both standard
-   * Parquet map representations, as well as legacy. Legacy representations are handled
-   * according to this specification:
-   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
-   *
-   * Standard representation of a map in Parquet:
-   * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
-   * repeated group key_value {
-   *     required <key-type> key;
-   *     <optional | required> <value-type> value;
-   *   }
-   * }
-   */
-  private static MapType convertMap(parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    if (outerGroup.getFieldCount() != 1){
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The logical MAP type must have exactly 1 inner field."));
-    }
-
-    parquet.schema.Type innerField = outerGroup.getType(0);
-    if (!innerField.isRepetition(parquet.schema.Type.Repetition.REPEATED)){
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The logical MAP type must have a repeated inner field."));
-    }
-    if (innerField.isPrimitive()) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The inner field of the logical MAP type must be a group."));
-    }
-
-    parquet.schema.GroupType innerGroup = innerField.asGroupType();
-    // It does not matter whether innerGroup has an annotation or not (for example it may
-    // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
-    // annotation and the case the innerGroup does not have an annotation the same.
-    if (innerGroup.getFieldCount() != 2) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
-    }
-
-    parquet.schema.Type key = innerGroup.getType(0);
-    if (!key.getName().equals("key")) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The name of the first field of the inner field of the logical MAP " +
-          "type must be 'key'"));
-    }
-    if (!key.isPrimitive()) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The key type of the logical MAP type must be primitive."));
-    }
-    parquet.schema.Type value = innerGroup.getType(1);
-    if (!value.getName().equals("value")) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The name of the second field of the inner field of the logical MAP " +
-          "type must be 'value'"));
-    }
-
-    return new MapType(convertParquetType(key), convertParquetType(value));
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala struct Type.
-   */
-  private static StructType convertStruct(parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    ArrayList<StructField> structFields = new ArrayList<StructField>();
-    for (parquet.schema.Type field: outerGroup.getFields()) {
-      StructField f = new StructField(field.getName(), convertParquetType(field));
-      structFields.add(f);
-    }
-    return new StructType(structFields);
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala array Type. We can handle the standard
-   * representation, but also legacy representations for backwards compatibility.
-   * Legacy representations are handled according to this specification:
-   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
-   *
-   * Standard representation of an array in Parquet:
-   * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
-   *   repeated group list {
-   *     <optional | required> <element-type> element;
-   *   }
-   * }
-   */
-  private static ArrayType convertArray(parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    if (outerGroup.getFieldCount() != 1) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "LIST", "The logical LIST type must have exactly 1 inner field."));
-    }
-
-    parquet.schema.Type innerField = outerGroup.getType(0);
-    if (!innerField.isRepetition(parquet.schema.Type.Repetition.REPEATED)) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "LIST", "The inner field of the logical LIST type must be repeated."));
-    }
-    if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
-      // From the Parquet Spec:
-      // 1. If the repeated field is not a group then it's type is the element type.
-      //
-      // If innerField is a group, but originalType is not null, the element type is
-      // based on the logical type.
-      return new ArrayType(convertParquetType(innerField));
-    }
-
-    parquet.schema.GroupType innerGroup = innerField.asGroupType();
-    if (innerGroup.getFieldCount() != 1) {
-      // From the Parquet Spec:
-      // 2. If the repeated field is a group with multiple fields, then it's type is a
-      //    struct.
-      return new ArrayType(convertStruct(innerGroup));
-    }
-
-    return new ArrayType(convertParquetType(innerGroup.getType(0)));
-  }
-
-  /**
-   * Converts a "logical" Parquet type to an Impala column type.
-   * A Parquet type is considered logical when it has an annotation. The annotation is
-   * stored as a "OriginalType". The Parquet documentation refers to these as logical
-   * types, so we use that terminology here.
-   */
-  private static Type convertLogicalParquetType(parquet.schema.Type parquetType)
-      throws AnalysisException {
-    OriginalType orig = parquetType.getOriginalType();
-    if (orig == OriginalType.LIST) {
-      return convertArray(parquetType.asGroupType());
-    }
-    if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
-      // MAP_KEY_VALUE annotation should not be used any more. However, according to the
-      // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
-      // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
-      // contained by a MAP-annotated group should be handled as a MAP-annotated group.
-      return convertMap(parquetType.asGroupType());
-    }
-
-    PrimitiveType prim = parquetType.asPrimitiveType();
-    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
-        orig == OriginalType.UTF8) {
-      // UTF8 is the type annotation Parquet uses for strings
-      // We check to make sure it applies to BINARY to avoid errors if there is a bad
-      // annotation.
-      return Type.STRING;
-    }
-
-    if (orig == OriginalType.DECIMAL) {
-      return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
-                                           prim.getDecimalMetadata().getScale());
-    }
-
-    throw new AnalysisException(
-        "Unsupported logical parquet type " + orig + " (primitive type is " +
-            prim.getPrimitiveTypeName().name() + ") for field " +
-            parquetType.getName());
-  }
-
-  /**
-   * Converts a Parquet type into an Impala type.
-   */
-  private static Type convertParquetType(parquet.schema.Type field)
-      throws AnalysisException {
-    Type type = null;
-    // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
-    // interpreted as an array. The following 2 examples should be interpreted as an array
-    // of integers, but this is currently not done.
-    // 1. repeated int int_col;
-    // 2. required group int_arr {
-    //      repeated group list {
-    //        required int element;
-    //      }
-    //    }
-    if (field.getOriginalType() != null) {
-      type = convertLogicalParquetType(field);
-    } else if (field.isPrimitive()) {
-      type = convertPrimitiveParquetType(field);
-    } else {
-      // If field is not primitive, it must be a struct.
-      type = convertStruct(field.asGroupType());
-    }
-    return type;
-  }
-
-  /**
-   * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
-   * This fails with an analysis exception if any errors occur reading the file,
-   * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
-   */
-  private static List<ColumnDef> extractParquetSchema(HdfsUri location)
-      throws AnalysisException {
-    parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
-    List<parquet.schema.Type> fields = parquetSchema.getFields();
-    List<ColumnDef> schema = new ArrayList<ColumnDef>();
-
-    for (parquet.schema.Type field: fields) {
-      Type type = convertParquetType(field);
-      Preconditions.checkNotNull(type);
-      String colName = field.getName();
-      schema.add(new ColumnDef(colName, new TypeDef(type),
-          "Inferred from Parquet file."));
-    }
-    return schema;
-  }
-
-  @Override
-  public String toSql() {
-    ArrayList<String> colsSql = Lists.newArrayList();
-    ArrayList<String> partitionColsSql = Lists.newArrayList();
-    HdfsCompression compression = HdfsCompression.fromFileName(
-        schemaLocation_.toString());
-    String s = ToSqlUtils.getCreateTableSql(getDb(),
-        getTbl() + " __LIKE_FILEFORMAT__ ",  getComment(), colsSql, partitionColsSql,
-        getTblProperties(), getSerdeProperties(), isExternal(), getIfNotExists(),
-        getRowFormat(), HdfsFileFormat.fromThrift(getFileFormat()),
-        compression, null, getLocation());
-    s = s.replace("__LIKE_FILEFORMAT__", "LIKE " + schemaFileFormat_ + " " +
-        schemaLocation_.toString());
-    return s;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    schemaLocation_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    switch (schemaFileFormat_) {
-      case PARQUET:
-        getColumnDefs().addAll(extractParquetSchema(schemaLocation_));
-        break;
-      default:
-        throw new AnalysisException("Unsupported file type for schema inference: "
-            + schemaFileFormat_);
-    }
-    super.analyze(analyzer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeStmt.java
deleted file mode 100644
index a7e2038..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableLikeStmt.java
+++ /dev/null
@@ -1,157 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.fs.permission.FsAction;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCreateTableLikeParams;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a CREATE TABLE LIKE statement which creates a new table based on
- * a copy of an existing table definition.
- */
-public class CreateTableLikeStmt extends StatementBase {
-  private final TableName tableName_;
-  private final TableName srcTableName_;
-  private final boolean isExternal_;
-  private final String comment_;
-  private final THdfsFileFormat fileFormat_;
-  private final HdfsUri location_;
-  private final boolean ifNotExists_;
-
-  // Set during analysis
-  private String dbName_;
-  private String srcDbName_;
-  private String owner_;
-
-  /**
-   * Builds a CREATE TABLE LIKE statement
-   * @param tableName - Name of the new table
-   * @param srcTableName - Name of the source table (table to copy)
-   * @param isExternal - If true, the table's data will be preserved if dropped.
-   * @param comment - Comment to attach to the table
-   * @param fileFormat - File format of the table
-   * @param location - The HDFS location of where the table data will stored.
-   * @param ifNotExists - If true, no errors are thrown if the table already exists
-   */
-  public CreateTableLikeStmt(TableName tableName, TableName srcTableName,
-      boolean isExternal, String comment, THdfsFileFormat fileFormat, HdfsUri location,
-      boolean ifNotExists) {
-    Preconditions.checkNotNull(tableName);
-    Preconditions.checkNotNull(srcTableName);
-    this.tableName_ = tableName;
-    this.srcTableName_ = srcTableName;
-    this.isExternal_ = isExternal;
-    this.comment_ = comment;
-    this.fileFormat_ = fileFormat;
-    this.location_ = location;
-    this.ifNotExists_ = ifNotExists;
-  }
-
-  public String getTbl() { return tableName_.getTbl(); }
-  public String getSrcTbl() { return srcTableName_.getTbl(); }
-  public boolean isExternal() { return isExternal_; }
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public String getComment() { return comment_; }
-  public THdfsFileFormat getFileFormat() { return fileFormat_; }
-  public HdfsUri getLocation() { return location_; }
-
-  /**
-   * Can only be called after analysis, returns the name of the database the table will
-   * be created within.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  /**
-   * Can only be called after analysis, returns the name of the database the table will
-   * be created within.
-   */
-  public String getSrcDb() {
-    Preconditions.checkNotNull(srcDbName_);
-    return srcDbName_;
-  }
-
-  public String getOwner() {
-    Preconditions.checkNotNull(owner_);
-    return owner_;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("CREATE ");
-    if (isExternal_) sb.append("EXTERNAL ");
-    sb.append("TABLE ");
-    if (ifNotExists_) sb.append("IF NOT EXISTS ");
-    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
-    sb.append(tableName_.getTbl() + " LIKE ");
-    if (srcTableName_.getDb() != null) sb.append(srcTableName_.getDb() + ".");
-    sb.append(srcTableName_.getTbl());
-    if (comment_ != null) sb.append(" COMMENT '" + comment_ + "'");
-    if (fileFormat_ != null) sb.append(" STORED AS " + fileFormat_);
-    if (location_ != null) sb.append(" LOCATION '" + location_ + "'");
-    return sb.toString();
-  }
-
-  public TCreateTableLikeParams toThrift() {
-    TCreateTableLikeParams params = new TCreateTableLikeParams();
-    params.setTable_name(new TTableName(getDb(), getTbl()));
-    params.setSrc_table_name(new TTableName(getSrcDb(), getSrcTbl()));
-    params.setOwner(getOwner());
-    params.setIs_external(isExternal());
-    params.setComment(comment_);
-    if (fileFormat_ != null) params.setFile_format(fileFormat_);
-    params.setLocation(location_ == null ? null : location_.toString());
-    params.setIf_not_exists(getIfNotExists());
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
-    Preconditions.checkState(srcTableName_ != null && !srcTableName_.isEmpty());
-    // Make sure the source table exists and the user has permission to access it.
-    srcDbName_ = analyzer
-        .getTable(srcTableName_, Privilege.VIEW_METADATA)
-        .getDb().getName();
-    tableName_.analyze();
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    owner_ = analyzer.getUser().getName();
-
-    if (analyzer.dbContainsTable(dbName_, tableName_.getTbl(), Privilege.CREATE) &&
-        !ifNotExists_) {
-      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
-          String.format("%s.%s", dbName_, getTbl()));
-    }
-    analyzer.addAccessEvent(new TAccessEvent(dbName_ + "." + tableName_.getTbl(),
-        TCatalogObjectType.TABLE, Privilege.CREATE.toString()));
-
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateTableStmt.java
deleted file mode 100644
index f7b683f..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateTableStmt.java
+++ /dev/null
@@ -1,416 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.avro.Schema;
-import org.apache.avro.SchemaParseException;
-import org.apache.hadoop.fs.permission.FsAction;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsStorageDescriptor;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCreateTableParams;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.AvroSchemaConverter;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.KuduUtil;
-import com.cloudera.impala.util.MetaStoreUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Represents a CREATE TABLE statement.
- */
-public class CreateTableStmt extends StatementBase {
-  private List<ColumnDef> columnDefs_;
-  private final String comment_;
-  private final boolean isExternal_;
-  private final boolean ifNotExists_;
-  private final THdfsFileFormat fileFormat_;
-  private final ArrayList<ColumnDef> partitionColDefs_;
-  private final RowFormat rowFormat_;
-  private TableName tableName_;
-  private final Map<String, String> tblProperties_;
-  private final Map<String, String> serdeProperties_;
-  private final HdfsCachingOp cachingOp_;
-  private HdfsUri location_;
-  private final List<DistributeParam> distributeParams_;
-
-  // Set during analysis
-  private String owner_;
-
-  /**
-   * Builds a CREATE TABLE statement
-   * @param tableName - Name of the new table
-   * @param columnDefs - List of column definitions for the table
-   * @param partitionColumnDefs - List of partition column definitions for the table
-   * @param isExternal - If true, the table's data will be preserved if dropped.
-   * @param comment - Comment to attach to the table
-   * @param rowFormat - Custom row format of the table. Use RowFormat.DEFAULT_ROW_FORMAT
-   *          to specify default row format.
-   * @param fileFormat - File format of the table
-   * @param location - The HDFS location of where the table data will stored.
-   * @param cachingOp - The HDFS caching op that should be applied to this table.
-   * @param ifNotExists - If true, no errors are thrown if the table already exists.
-   * @param tblProperties - Optional map of key/values to persist with table metadata.
-   * @param serdeProperties - Optional map of key/values to persist with table serde
-   *                          metadata.
-   */
-  public CreateTableStmt(TableName tableName, List<ColumnDef> columnDefs,
-      List<ColumnDef> partitionColumnDefs, boolean isExternal, String comment,
-      RowFormat rowFormat, THdfsFileFormat fileFormat, HdfsUri location,
-      HdfsCachingOp cachingOp, boolean ifNotExists, Map<String, String> tblProperties,
-      Map<String, String> serdeProperties, List<DistributeParam> distributeParams) {
-    Preconditions.checkNotNull(columnDefs);
-    Preconditions.checkNotNull(partitionColumnDefs);
-    Preconditions.checkNotNull(fileFormat);
-    Preconditions.checkNotNull(rowFormat);
-    Preconditions.checkNotNull(tableName);
-
-    columnDefs_ = Lists.newArrayList(columnDefs);
-    comment_ = comment;
-    isExternal_ = isExternal;
-    ifNotExists_ = ifNotExists;
-    fileFormat_ = fileFormat;
-    location_ = location;
-    cachingOp_ = cachingOp;
-    partitionColDefs_ = Lists.newArrayList(partitionColumnDefs);
-    rowFormat_ = rowFormat;
-    tableName_ = tableName;
-    tblProperties_ = tblProperties;
-    serdeProperties_ = serdeProperties;
-    unescapeProperties(tblProperties_);
-    unescapeProperties(serdeProperties_);
-    distributeParams_ = distributeParams;
-  }
-
-  /**
-   * Copy c'tor.
-   */
-  public CreateTableStmt(CreateTableStmt other) {
-    columnDefs_ = Lists.newArrayList(other.columnDefs_);
-    comment_ = other.comment_;
-    isExternal_ = other.isExternal_;
-    ifNotExists_ = other.ifNotExists_;
-    fileFormat_ = other.fileFormat_;
-    location_ = other.location_;
-    cachingOp_ = other.cachingOp_;
-    partitionColDefs_ = Lists.newArrayList(other.partitionColDefs_);
-    rowFormat_ = other.rowFormat_;
-    tableName_ = other.tableName_;
-    tblProperties_ = other.tblProperties_;
-    serdeProperties_ = other.serdeProperties_;
-    distributeParams_ = other.distributeParams_;
-  }
-
-  @Override
-  public CreateTableStmt clone() { return new CreateTableStmt(this); }
-
-  public String getTbl() { return tableName_.getTbl(); }
-  public TableName getTblName() { return tableName_; }
-  public List<ColumnDef> getColumnDefs() { return columnDefs_; }
-  public List<ColumnDef> getPartitionColumnDefs() { return partitionColDefs_; }
-  public String getComment() { return comment_; }
-  public boolean isExternal() { return isExternal_; }
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public HdfsUri getLocation() { return location_; }
-  public void setLocation(HdfsUri location) { this.location_ = location; }
-  public THdfsFileFormat getFileFormat() { return fileFormat_; }
-  public RowFormat getRowFormat() { return rowFormat_; }
-  public Map<String, String> getTblProperties() { return tblProperties_; }
-  public Map<String, String> getSerdeProperties() { return serdeProperties_; }
-
-  /**
-   * Can only be called after analysis, returns the owner of this table (the user from
-   * the current session).
-   */
-  public String getOwner() {
-    Preconditions.checkNotNull(owner_);
-    return owner_;
-  }
-
-  /**
-   * Can only be called after analysis, returns the name of the database the table will
-   * be created within.
-   */
-  public String getDb() {
-    Preconditions.checkState(isAnalyzed());
-    return tableName_.getDb();
-  }
-
-  @Override
-  public String toSql() { return ToSqlUtils.getCreateTableSql(this); }
-
-  public TCreateTableParams toThrift() {
-    TCreateTableParams params = new TCreateTableParams();
-    params.setTable_name(new TTableName(getDb(), getTbl()));
-    for (ColumnDef col: getColumnDefs()) {
-      params.addToColumns(col.toThrift());
-    }
-    for (ColumnDef col: getPartitionColumnDefs()) {
-      params.addToPartition_columns(col.toThrift());
-    }
-    params.setOwner(getOwner());
-    params.setIs_external(isExternal());
-    params.setComment(comment_);
-    params.setLocation(location_ == null ? null : location_.toString());
-    if (cachingOp_ != null) params.setCache_op(cachingOp_.toThrift());
-    params.setRow_format(rowFormat_.toThrift());
-    params.setFile_format(fileFormat_);
-    params.setIf_not_exists(getIfNotExists());
-    if (tblProperties_ != null) params.setTable_properties(tblProperties_);
-    if (serdeProperties_ != null) params.setSerde_properties(serdeProperties_);
-    if (distributeParams_ != null) {
-      for (DistributeParam d : distributeParams_) {
-        params.addToDistribute_by(d.toThrift());
-      }
-    }
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
-    tableName_ = analyzer.getFqTableName(tableName_);
-    tableName_.analyze();
-    owner_ = analyzer.getUser().getName();
-
-    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
-    MetaStoreUtil.checkShortPropertyMap("Serde property", serdeProperties_);
-
-    if (analyzer.dbContainsTable(tableName_.getDb(), tableName_.getTbl(),
-        Privilege.CREATE) && !ifNotExists_) {
-      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG + tableName_);
-    }
-
-    analyzer.addAccessEvent(new TAccessEvent(tableName_.toString(),
-        TCatalogObjectType.TABLE, Privilege.CREATE.toString()));
-
-    // Only Avro tables can have empty column defs because they can infer them from
-    // the Avro schema.
-    if (columnDefs_.isEmpty() && fileFormat_ != THdfsFileFormat.AVRO) {
-      throw new AnalysisException("Table requires at least 1 column");
-    }
-
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    }
-
-    analyzeRowFormat(analyzer);
-
-    // Check that all the column names are valid and unique.
-    analyzeColumnDefs(analyzer);
-
-    if (getTblProperties() != null && KuduTable.KUDU_STORAGE_HANDLER.equals(
-        getTblProperties().get(KuduTable.KEY_STORAGE_HANDLER))) {
-      analyzeKuduTable(analyzer);
-    } else if (distributeParams_ != null) {
-      throw new AnalysisException("Only Kudu tables can use DISTRIBUTE BY clause.");
-    }
-
-    if (fileFormat_ == THdfsFileFormat.AVRO) {
-      columnDefs_ = analyzeAvroSchema(analyzer);
-      if (columnDefs_.isEmpty()) {
-        throw new AnalysisException(
-            "An Avro table requires column definitions or an Avro schema.");
-      }
-      AvroSchemaUtils.setFromSerdeComment(columnDefs_);
-      analyzeColumnDefs(analyzer);
-    }
-
-    if (cachingOp_ != null) {
-      cachingOp_.analyze(analyzer);
-      if (cachingOp_.shouldCache() && location_ != null &&
-          !FileSystemUtil.isPathCacheable(location_.getPath())) {
-        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
-            "Please retry without caching: CREATE TABLE %s ... UNCACHED",
-            location_.toString(), tableName_));
-      }
-    }
-
-    // Analyze 'skip.header.line.format' property.
-    if (tblProperties_ != null) {
-      AlterTableSetTblProperties.analyzeSkipHeaderLineCount(tblProperties_);
-    }
-  }
-
-  private void analyzeRowFormat(Analyzer analyzer) throws AnalysisException {
-    Byte fieldDelim = analyzeRowFormatValue(rowFormat_.getFieldDelimiter());
-    Byte lineDelim = analyzeRowFormatValue(rowFormat_.getLineDelimiter());
-    Byte escapeChar = analyzeRowFormatValue(rowFormat_.getEscapeChar());
-    if (fileFormat_ == THdfsFileFormat.TEXT) {
-      if (fieldDelim == null) fieldDelim = HdfsStorageDescriptor.DEFAULT_FIELD_DELIM;
-      if (lineDelim == null) lineDelim = HdfsStorageDescriptor.DEFAULT_LINE_DELIM;
-      if (escapeChar == null) escapeChar = HdfsStorageDescriptor.DEFAULT_ESCAPE_CHAR;
-      if (fieldDelim != null && lineDelim != null && fieldDelim.equals(lineDelim)) {
-        throw new AnalysisException("Field delimiter and line delimiter have same " +
-            "value: byte " + fieldDelim);
-      }
-      if (fieldDelim != null && escapeChar != null && fieldDelim.equals(escapeChar)) {
-        analyzer.addWarning("Field delimiter and escape character have same value: " +
-            "byte " + fieldDelim + ". Escape character will be ignored");
-      }
-      if (lineDelim != null && escapeChar != null && lineDelim.equals(escapeChar)) {
-        analyzer.addWarning("Line delimiter and escape character have same value: " +
-            "byte " + lineDelim + ". Escape character will be ignored");
-      }
-    }
-  }
-
-  /**
-   * Analyzes columnDefs_ and partitionColDefs_ checking whether all column
-   * names are unique.
-   */
-  private void analyzeColumnDefs(Analyzer analyzer) throws AnalysisException {
-    Set<String> colNames = Sets.newHashSet();
-    for (ColumnDef colDef: columnDefs_) {
-      colDef.analyze();
-      if (!colNames.add(colDef.getColName().toLowerCase())) {
-        throw new AnalysisException("Duplicate column name: " + colDef.getColName());
-      }
-    }
-    for (ColumnDef colDef: partitionColDefs_) {
-      colDef.analyze();
-      if (!colDef.getType().supportsTablePartitioning()) {
-        throw new AnalysisException(
-            String.format("Type '%s' is not supported as partition-column type " +
-                "in column: %s", colDef.getType().toSql(), colDef.getColName()));
-      }
-      if (!colNames.add(colDef.getColName().toLowerCase())) {
-        throw new AnalysisException("Duplicate column name: " + colDef.getColName());
-      }
-    }
-  }
-
-  /**
-   * Analyzes the Avro schema and compares it with the columnDefs_ to detect
-   * inconsistencies. Returns a list of column descriptors that should be
-   * used for creating the table (possibly identical to columnDefs_).
-   */
-  private List<ColumnDef> analyzeAvroSchema(Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkState(fileFormat_ == THdfsFileFormat.AVRO);
-    // Look for the schema in TBLPROPERTIES and in SERDEPROPERTIES, with latter
-    // taking precedence.
-    List<Map<String, String>> schemaSearchLocations = Lists.newArrayList();
-    schemaSearchLocations.add(serdeProperties_);
-    schemaSearchLocations.add(tblProperties_);
-    String avroSchema = null;
-    List<ColumnDef> avroCols = null; // parsed from avroSchema
-    try {
-      avroSchema = AvroSchemaUtils.getAvroSchema(schemaSearchLocations);
-      if (avroSchema == null) {
-        // No Avro schema was explicitly set in the serde or table properties, so infer
-        // the Avro schema from the column definitions.
-        Schema inferredSchema = AvroSchemaConverter.convertColumnDefs(
-            columnDefs_, tableName_.toString());
-        avroSchema = inferredSchema.toString();
-      }
-      if (Strings.isNullOrEmpty(avroSchema)) {
-        throw new AnalysisException("Avro schema is null or empty: " +
-            tableName_.toString());
-      }
-      avroCols = AvroSchemaParser.parse(avroSchema);
-    } catch (SchemaParseException e) {
-      throw new AnalysisException(String.format(
-          "Error parsing Avro schema for table '%s': %s", tableName_.toString(),
-          e.getMessage()));
-    }
-    Preconditions.checkNotNull(avroCols);
-
-    // Analyze the Avro schema to detect inconsistencies with the columnDefs_.
-    // In case of inconsistencies, the column defs are ignored in favor of the Avro
-    // schema for simplicity and, in particular, to enable COMPUTE STATS (IMPALA-1104).
-    StringBuilder warning = new StringBuilder();
-    List<ColumnDef> reconciledColDefs =
-        AvroSchemaUtils.reconcileSchemas(columnDefs_, avroCols, warning);
-    if (warning.length() > 0) analyzer.addWarning(warning.toString());
-    return reconciledColDefs;
-  }
-
-  private void analyzeKuduTable(Analyzer analyzer) throws AnalysisException {
-    // Validate that Kudu table is correctly specified.
-    if (!KuduTable.tableParamsAreValid(getTblProperties())) {
-      throw new AnalysisException("Kudu table is missing parameters " +
-          String.format("in table properties. Please verify if %s, %s, and %s are "
-                  + "present and have valid values.",
-              KuduTable.KEY_TABLE_NAME, KuduTable.KEY_MASTER_ADDRESSES,
-              KuduTable.KEY_KEY_COLUMNS));
-    }
-
-    // Kudu table cannot be a cached table
-    if (cachingOp_ != null) {
-      throw new AnalysisException("A Kudu table cannot be cached in HDFS.");
-    }
-
-    if (distributeParams_ != null) {
-      if (isExternal_) {
-        throw new AnalysisException(
-            "The DISTRIBUTE BY clause may not be specified for external tables.");
-      }
-
-      List<String> keyColumns = KuduUtil.parseKeyColumnsAsList(
-          getTblProperties().get(KuduTable.KEY_KEY_COLUMNS));
-      for (DistributeParam d : distributeParams_) {
-        // If the columns are not set, default to all key columns
-        if (d.getColumns() == null) d.setColumns(keyColumns);
-        d.analyze(analyzer);
-      }
-    } else if (!isExternal_) {
-      throw new AnalysisException(
-          "A data distribution must be specified using the DISTRIBUTE BY clause.");
-    }
-  }
-
-  private Byte analyzeRowFormatValue(String value) throws AnalysisException {
-    if (value == null) return null;
-    Byte byteVal = HdfsStorageDescriptor.parseDelim(value);
-    if (byteVal == null) {
-      throw new AnalysisException("ESCAPED BY values and LINE/FIELD " +
-          "terminators must be specified as a single character or as a decimal " +
-          "value in the range [-128:127]: " + value);
-    }
-    return byteVal;
-  }
-
-  /**
-   * Unescapes all values in the property map.
-   */
-  public static void unescapeProperties(Map<String, String> propertyMap) {
-    if (propertyMap == null) return;
-    for (Map.Entry<String, String> kv : propertyMap.entrySet()) {
-      propertyMap.put(kv.getKey(),
-          new StringLiteral(kv.getValue()).getUnescapedValue());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateUdaStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateUdaStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateUdaStmt.java
deleted file mode 100644
index 46b0003..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateUdaStmt.java
+++ /dev/null
@@ -1,209 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TSymbolType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a CREATE AGGREGATE FUNCTION statement.
- */
-public class CreateUdaStmt extends CreateFunctionStmtBase {
-  private final TypeDef intermediateTypeDef_;
-
-  /**
-   * Builds a CREATE AGGREGATE FUNCTION statement
-   * @param fnName - Name of the function
-   * @param fnArgs - List of types for the arguments to this function
-   * @param retType - The type this function returns.
-   * @param intermediateType_- The type used for the intermediate data.
-   * @param location - Path in HDFS containing the UDA.
-   * @param ifNotExists - If true, no errors are thrown if the function already exists
-   * @param additionalArgs - Key/Value pairs for additional arguments. The keys are
-   *        validated in analyze()
-   */
-  public CreateUdaStmt(FunctionName fnSymbol, FunctionArgs args,
-      TypeDef retTypeDef, TypeDef intermediateTypeDef,
-      HdfsUri location, boolean ifNotExists,
-      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
-    super(fnSymbol, args, retTypeDef, location, ifNotExists, optArgs);
-    intermediateTypeDef_ = intermediateTypeDef;
-  }
-
-  private void reportCouldNotInferSymbol(String function) throws AnalysisException {
-    throw new AnalysisException("Could not infer symbol for "
-        + function + "() function.");
-  }
-
-  // Gets the symbol for 'arg'. If the user set it from the dll, return that. Otherwise
-  // try to infer the Symbol from the Update function. To infer the Symbol, the update
-  // function must contain "update" or "Update" and we switch that out with 'defaultSymbol'.
-  // Returns null if no symbol was found.
-  private String getSymbolSymbol(OptArg arg, String defaultSymbol) {
-    // First lookup if the user explicitly set it.
-    if (optArgs_.get(arg) != null) return optArgs_.get(arg);
-    // Try to match it from Update
-    String updateFn = optArgs_.get(OptArg.UPDATE_FN);
-    // Mangled strings start with _Z. We can't get substitute Symbols for mangled
-    // strings.
-    // TODO: this is doable in the BE with more symbol parsing.
-    if (updateFn.startsWith("_Z")) return null;
-
-    if (updateFn.contains("update")) return updateFn.replace("update", defaultSymbol);
-    if (updateFn.contains("Update")) {
-      char[] array = defaultSymbol.toCharArray();
-      array[0] = Character.toUpperCase(array[0]);
-      String s = new String(array);
-      return updateFn.replace("Update", s);
-    }
-    return null;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Preconditions.checkNotNull(fn_);
-    Preconditions.checkState(fn_ instanceof AggregateFunction);
-    AggregateFunction uda = (AggregateFunction) fn_;
-
-    if (uda.getNumArgs() == 0) {
-      throw new AnalysisException("UDAs must take at least one argument.");
-    }
-
-    if (uda.getBinaryType() == TFunctionBinaryType.JAVA) {
-      throw new AnalysisException("Java UDAs are not supported.");
-    }
-
-    // TODO: these are temporarily restrictions since the BE cannot yet
-    // execute them.
-    if (uda.getBinaryType() == TFunctionBinaryType.IR) {
-      throw new AnalysisException("IR UDAs are not yet supported.");
-    }
-    if (fn_.hasVarArgs()) {
-      throw new AnalysisException("UDAs with varargs are not yet supported.");
-    }
-    if (fn_.getNumArgs() > 8) {
-      throw new AnalysisException(
-          "UDAs with more than 8 arguments are not yet supported.");
-    }
-
-    if (uda.getReturnType().getPrimitiveType() == PrimitiveType.CHAR) {
-      throw new AnalysisException("UDAs with CHAR return type are not yet supported.");
-    }
-    if (uda.getReturnType().getPrimitiveType() == PrimitiveType.VARCHAR) {
-      throw new AnalysisException("UDAs with VARCHAR return type are not yet supported.");
-    }
-    for (int i = 0; i < uda.getNumArgs(); ++i) {
-      if (uda.getArgs()[i].getPrimitiveType() == PrimitiveType.CHAR) {
-        throw new AnalysisException("UDAs with CHAR arguments are not yet supported.");
-      }
-      if (uda.getArgs()[i].getPrimitiveType() == PrimitiveType.VARCHAR) {
-        throw new AnalysisException("UDAs with VARCHAR arguments are not yet supported.");
-      }
-    }
-
-    Type intermediateType = null;
-    if (intermediateTypeDef_ == null) {
-      intermediateType = uda.getReturnType();
-    } else {
-      intermediateTypeDef_.analyze(analyzer);
-      intermediateType = intermediateTypeDef_.getType();
-    }
-    uda.setIntermediateType(intermediateType);
-
-    // Check arguments that are only valid in UDFs are not set.
-    checkOptArgNotSet(OptArg.SYMBOL);
-    checkOptArgNotSet(OptArg.PREPARE_FN);
-    checkOptArgNotSet(OptArg.CLOSE_FN);
-
-    // The user must provide the symbol for Update.
-    uda.setUpdateFnSymbol(uda.lookupSymbol(
-        checkAndGetOptArg(OptArg.UPDATE_FN), TSymbolType.UDF_EVALUATE, intermediateType,
-        uda.hasVarArgs(), uda.getArgs()));
-
-    // If the ddl did not specify the init/serialize/merge/finalize function
-    // Symbols, guess them based on the update fn Symbol.
-    Preconditions.checkNotNull(uda.getUpdateFnSymbol());
-    uda.setInitFnSymbol(getSymbolSymbol(OptArg.INIT_FN, "init"));
-    uda.setSerializeFnSymbol(getSymbolSymbol(OptArg.SERIALIZE_FN, "serialize"));
-    uda.setMergeFnSymbol(getSymbolSymbol(OptArg.MERGE_FN, "merge"));
-    uda.setFinalizeFnSymbol(getSymbolSymbol(OptArg.FINALIZE_FN, "finalize"));
-
-    // Init and merge are required.
-    if (uda.getInitFnSymbol() == null) reportCouldNotInferSymbol("init");
-    if (uda.getMergeFnSymbol() == null) reportCouldNotInferSymbol("merge");
-
-    // Validate that all set symbols exist.
-    uda.setInitFnSymbol(uda.lookupSymbol(uda.getInitFnSymbol(),
-        TSymbolType.UDF_EVALUATE, intermediateType, false));
-    uda.setMergeFnSymbol(uda.lookupSymbol(uda.getMergeFnSymbol(),
-        TSymbolType.UDF_EVALUATE, intermediateType, false, intermediateType));
-    if (uda.getSerializeFnSymbol() != null) {
-      try {
-        uda.setSerializeFnSymbol(uda.lookupSymbol(uda.getSerializeFnSymbol(),
-            TSymbolType.UDF_EVALUATE, null, false, intermediateType));
-      } catch (AnalysisException e) {
-        if (optArgs_.get(OptArg.SERIALIZE_FN) != null) {
-          throw e;
-        } else {
-          // Ignore, these symbols are optional.
-          uda.setSerializeFnSymbol(null);
-        }
-      }
-    }
-    if (uda.getFinalizeFnSymbol() != null) {
-      try {
-        uda.setFinalizeFnSymbol(uda.lookupSymbol(
-            uda.getFinalizeFnSymbol(), TSymbolType.UDF_EVALUATE, null, false,
-            intermediateType));
-      } catch (AnalysisException e) {
-        if (optArgs_.get(OptArg.FINALIZE_FN) != null) {
-          throw e;
-        } else {
-          // Ignore, these symbols are optional.
-          uda.setFinalizeFnSymbol(null);
-        }
-      }
-    }
-
-    // If the intermediate type is not the return type, then finalize is
-    // required.
-    if (!intermediateType.equals(fn_.getReturnType()) &&
-        uda.getFinalizeFnSymbol() == null) {
-      throw new AnalysisException("Finalize() is required for this UDA.");
-    }
-
-    sqlString_ = uda.toSql(ifNotExists_);
-  }
-
-  @Override
-  protected Function createFunction(FunctionName fnName, ArrayList<Type> argTypes,
-      Type retType, boolean hasVarArgs) {
-    return new AggregateFunction(fnName_, args_.getArgTypes(), retTypeDef_.getType(),
-        args_.hasVarArgs());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateUdfStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateUdfStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateUdfStmt.java
deleted file mode 100644
index 550d26f..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateUdfStmt.java
+++ /dev/null
@@ -1,140 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.hive.executor.UdfExecutor.JavaUdfDataType;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TSymbolType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a CREATE FUNCTION statement.
- */
-public class CreateUdfStmt extends CreateFunctionStmtBase {
-  /**
-   * Builds a CREATE FUNCTION statement
-   * @param fnName - Name of the function
-   * @param fnArgs - List of types for the arguments to this function
-   * @param retType - The type this function returns.
-   * @param location - Path in HDFS containing the UDA.
-   * @param ifNotExists - If true, no errors are thrown if the function already exists
-   * @param additionalArgs - Key/Value pairs for additional arguments. The keys are
-   *        validated in analyze()
-   */
-  public CreateUdfStmt(FunctionName fnName, FunctionArgs args,
-      TypeDef retTypeDef, HdfsUri location, boolean ifNotExists,
-      HashMap<CreateFunctionStmtBase.OptArg, String> optArgs) {
-    super(fnName, args, retTypeDef, location, ifNotExists, optArgs);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Preconditions.checkNotNull(fn_);
-    Preconditions.checkNotNull(fn_ instanceof ScalarFunction);
-    ScalarFunction udf = (ScalarFunction) fn_;
-
-    if (hasSignature()) {
-      if (udf.getBinaryType() == TFunctionBinaryType.JAVA) {
-        if (!JavaUdfDataType.isSupported(udf.getReturnType())) {
-          throw new AnalysisException(
-              "Type " + udf.getReturnType().toSql() + " is not supported for Java UDFs.");
-        }
-        for (int i = 0; i < udf.getNumArgs(); ++i) {
-          if (!JavaUdfDataType.isSupported(udf.getArgs()[i])) {
-            throw new AnalysisException(
-                "Type " + udf.getArgs()[i].toSql() + " is not supported for Java UDFs.");
-          }
-        }
-      }
-
-      if (udf.getReturnType().getPrimitiveType() == PrimitiveType.CHAR) {
-        throw new AnalysisException("UDFs that use CHAR are not yet supported.");
-      }
-      if (udf.getReturnType().getPrimitiveType() == PrimitiveType.VARCHAR) {
-        throw new AnalysisException("UDFs that use VARCHAR are not yet supported.");
-      }
-      for (int i = 0; i < udf.getNumArgs(); ++i) {
-        if (udf.getArgs()[i].getPrimitiveType() == PrimitiveType.CHAR) {
-          throw new AnalysisException("UDFs that use CHAR are not yet supported.");
-        }
-        if (udf.getArgs()[i].getPrimitiveType() == PrimitiveType.VARCHAR) {
-          throw new AnalysisException("UDFs that use VARCHAR are not yet supported.");
-        }
-      }
-    }
-
-    // Check the user provided symbol exists
-    udf.setSymbolName(udf.lookupSymbol(
-        checkAndGetOptArg(OptArg.SYMBOL), TSymbolType.UDF_EVALUATE, null,
-        udf.hasVarArgs(), udf.getArgs()));
-
-    // Set optional Prepare/Close functions
-    String prepareFn = optArgs_.get(OptArg.PREPARE_FN);
-    if (prepareFn != null) {
-      udf.setPrepareFnSymbol(udf.lookupSymbol(prepareFn, TSymbolType.UDF_PREPARE));
-    }
-    String closeFn = optArgs_.get(OptArg.CLOSE_FN);
-    if (closeFn != null) {
-      udf.setCloseFnSymbol(udf.lookupSymbol(closeFn, TSymbolType.UDF_CLOSE));
-    }
-
-    // Udfs should not set any of these
-    checkOptArgNotSet(OptArg.UPDATE_FN);
-    checkOptArgNotSet(OptArg.INIT_FN);
-    checkOptArgNotSet(OptArg.SERIALIZE_FN);
-    checkOptArgNotSet(OptArg.MERGE_FN);
-    checkOptArgNotSet(OptArg.FINALIZE_FN);
-
-    sqlString_ = udf.toSql(ifNotExists_);
-
-    // Check that there is no function with the same name and isPersistent field not
-    // the same as udf.isPersistent_. For example we don't allow two JAVA udfs with
-    // same name and opposite persistence values set. This only applies for JAVA udfs
-    // as all the native udfs are persisted. Additionally we don't throw exceptions
-    // if "IF NOT EXISTS" is specified in the query.
-    if (udf.getBinaryType() != TFunctionBinaryType.JAVA || ifNotExists_) return;
-
-    Preconditions.checkNotNull(db_);
-    for (Function fn: db_.getFunctions(udf.functionName())) {
-      if (!hasSignature() || (hasSignature() && fn.isPersistent())) {
-        throw new AnalysisException(
-            String.format(Analyzer.FN_ALREADY_EXISTS_ERROR_MSG +
-                fn.signatureString()));
-      }
-    }
-  }
-
-  @Override
-  protected Function createFunction(FunctionName fnName, ArrayList<Type> argTypes, Type retType,
-      boolean hasVarArgs) {
-    return new ScalarFunction(fnName, argTypes, retType, hasVarArgs);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/CreateViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/CreateViewStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/CreateViewStmt.java
deleted file mode 100644
index c38eef0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/CreateViewStmt.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a CREATE VIEW statement.
- */
-public class CreateViewStmt extends CreateOrAlterViewStmtBase {
-
-  public CreateViewStmt(boolean ifNotExists, TableName tableName,
-      ArrayList<ColumnDef> columnDefs, String comment, QueryStmt viewDefStmt) {
-    super(ifNotExists, tableName, columnDefs, comment, viewDefStmt);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
-
-    tableName_.analyze();
-    // Use a child analyzer to let views have complex-typed columns.
-    Analyzer viewAnalyzerr = new Analyzer(analyzer);
-    // Enforce Hive column labels for view compatibility.
-    viewAnalyzerr.setUseHiveColLabels(true);
-    viewDefStmt_.analyze(viewAnalyzerr);
-
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    owner_ = analyzer.getUser().getName();
-    if (analyzer.dbContainsTable(dbName_, tableName_.getTbl(), Privilege.CREATE) &&
-        !ifNotExists_) {
-      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
-          String.format("%s.%s", dbName_, tableName_.getTbl()));
-    }
-    analyzer.addAccessEvent(new TAccessEvent(dbName_ + "." + tableName_.getTbl(),
-        TCatalogObjectType.VIEW, Privilege.CREATE.toString()));
-
-    createColumnAndViewDefs(analyzer);
-    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
-      computeLineageGraph(analyzer);
-    }
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("CREATE VIEW ");
-    if (ifNotExists_) sb.append("IF NOT EXISTS ");
-    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
-    sb.append(tableName_.getTbl() + " (");
-    sb.append(Joiner.on(", ").join(columnDefs_));
-    sb.append(") AS ");
-    sb.append(viewDefStmt_.toSql());
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DeleteStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DeleteStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DeleteStmt.java
deleted file mode 100644
index efa2117..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DeleteStmt.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
-import com.cloudera.impala.planner.KuduTableSink;
-import com.cloudera.impala.planner.TableSink;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.client.Delete;
-
-/**
- * Representation of a DELETE statement.
- *
- * A delete statement contains three main parts, the target table reference, the from
- * clause and the optional where clause. Syntactically, this is represented as follows:
- *
- *     DELETE [FROM] dotted_path [WHERE expr]
- *     DELETE [table_alias] FROM table_ref_list [WHERE expr]
- *
- * Only the syntax using the explicit from clause can contain join conditions.
- */
-public class DeleteStmt extends ModifyStmt {
-
-  public DeleteStmt(List<String> targetTablePath, FromClause tableRefs,
-      Expr wherePredicate, boolean ignoreNotFound) {
-    super(targetTablePath, tableRefs, Lists.<Pair<SlotRef, Expr>>newArrayList(),
-        wherePredicate, ignoreNotFound);
-  }
-
-  public DeleteStmt(DeleteStmt other) {
-    super(other.targetTablePath_, other.fromClause_.clone(),
-        Lists.<Pair<SlotRef, Expr>>newArrayList(), other.wherePredicate_.clone(),
-        other.ignoreNotFound_);
-  }
-
-  public DataSink createDataSink() {
-    // analyze() must have been called before.
-    Preconditions.checkState(table_ != null);
-    TableSink tableSink = TableSink.create(table_, TableSink.Op.DELETE,
-        ImmutableList.<Expr>of(), referencedColumns_, false, ignoreNotFound_);
-    Preconditions.checkState(!referencedColumns_.isEmpty());
-    return tableSink;
-  }
-
-  @Override
-  public DeleteStmt clone() {
-    return new DeleteStmt(this);
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder b = new StringBuilder();
-    b.append("DELETE");
-    if (ignoreNotFound_) b.append(" IGNORE");
-    if (fromClause_.size() > 1 || targetTableRef_.hasExplicitAlias()) {
-      b.append(" ");
-      if (targetTableRef_.hasExplicitAlias()) {
-        b.append(targetTableRef_.getExplicitAlias());
-      } else {
-        b.append(targetTableRef_.toSql());
-      }
-    }
-    b.append(fromClause_.toSql());
-    if (wherePredicate_ != null) {
-      b.append(" WHERE ");
-      b.append(wherePredicate_.toSql());
-    }
-    return b.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DescribeDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DescribeDbStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DescribeDbStmt.java
deleted file mode 100644
index 0ddd6ec..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DescribeDbStmt.java
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDescribeDbParams;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
-/**
- * Represents a DESCRIBE DATABASE statement which returns metadata on
- * a specified database:
- * Syntax: DESCRIBE DATABASE [FORMATTED|EXTENDED] <db>
- *
- * If FORMATTED|EXTENDED is not specified, the statement only returns the given
- * database's location and comment.
- * If FORMATTED|EXTENDED is specified, extended metadata on the database is returned.
- * This metadata includes info about the database's parameters, owner info
- * and privileges.
- */
-public class DescribeDbStmt extends StatementBase {
-  private final TDescribeOutputStyle outputStyle_;
-  private final String dbName_;
-
-  public DescribeDbStmt(String dbName, TDescribeOutputStyle outputStyle) {
-    Preconditions.checkState(!Strings.isNullOrEmpty(dbName), "Invalid database name");
-    dbName_ = dbName;
-    outputStyle_ = outputStyle;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DESCRIBE DATABASE ");
-    if (outputStyle_ != TDescribeOutputStyle.MINIMAL) {
-      sb.append(outputStyle_.toString() + " ");
-    }
-    return sb.toString() + dbName_;
-  }
-
-  public String getDb() { return dbName_; }
-  public TDescribeOutputStyle getOutputStyle() { return outputStyle_; }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    analyzer.getDb(dbName_, Privilege.VIEW_METADATA);
-  }
-
-  public TDescribeDbParams toThrift() {
-    TDescribeDbParams params = new TDescribeDbParams();
-    params.setDb(dbName_);
-    params.setOutput_style(outputStyle_);
-    return params;
-  }
-}


[36/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/StmtRewriter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/StmtRewriter.java b/fe/src/main/java/com/cloudera/impala/analysis/StmtRewriter.java
deleted file mode 100644
index 8304c66..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/StmtRewriter.java
+++ /dev/null
@@ -1,905 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AnalysisContext.AnalysisResult;
-import com.cloudera.impala.analysis.UnionStmt.UnionOperand;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
-/**
- * Class representing a statement rewriter. A statement rewriter performs subquery
- * unnesting on an analyzed parse tree.
- * TODO: Now that we have a nested-loop join supporting all join modes we could
- * allow more rewrites, although it is not clear we would always want to.
- */
-public class StmtRewriter {
-  private final static Logger LOG = LoggerFactory.getLogger(StmtRewriter.class);
-
-  /**
-   * Rewrite the statement of an analysis result. The unanalyzed rewritten
-   * statement is returned.
-   */
-  public static StatementBase rewrite(AnalysisResult analysisResult)
-      throws AnalysisException {
-    // Analyzed stmt that contains a query statement with subqueries to be rewritten.
-    StatementBase stmt = analysisResult.getStmt();
-    Preconditions.checkState(stmt.isAnalyzed());
-    // Analyzed query statement to be rewritten.
-    QueryStmt queryStmt = null;
-    if (stmt instanceof QueryStmt) {
-      queryStmt = (QueryStmt) analysisResult.getStmt();
-    } else if (stmt instanceof InsertStmt) {
-      queryStmt = ((InsertStmt) analysisResult.getStmt()).getQueryStmt();
-    } else if (stmt instanceof CreateTableAsSelectStmt) {
-      queryStmt = ((CreateTableAsSelectStmt) analysisResult.getStmt()).getQueryStmt();
-    } else if (analysisResult.isUpdateStmt()) {
-      queryStmt = ((UpdateStmt) analysisResult.getStmt()).getQueryStmt();
-    } else if (analysisResult.isDeleteStmt()) {
-      queryStmt = ((DeleteStmt) analysisResult.getStmt()).getQueryStmt();
-    } else {
-      throw new AnalysisException("Unsupported statement containing subqueries: " +
-          stmt.toSql());
-    }
-    rewriteQueryStatement(queryStmt, queryStmt.getAnalyzer());
-    stmt.reset();
-    return stmt;
-  }
-
-  /**
-   * Calls the appropriate rewrite method based on the specific type of query stmt. See
-   * rewriteSelectStatement() and rewriteUnionStatement() documentation.
-   */
-  public static void rewriteQueryStatement(QueryStmt stmt, Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkNotNull(stmt);
-    Preconditions.checkNotNull(stmt.isAnalyzed());
-    if (stmt instanceof SelectStmt) {
-      rewriteSelectStatement((SelectStmt)stmt, analyzer);
-    } else if (stmt instanceof UnionStmt) {
-      rewriteUnionStatement((UnionStmt)stmt, analyzer);
-    } else {
-      throw new AnalysisException("Subqueries not supported for " +
-          stmt.getClass().getSimpleName() + " statements");
-    }
-  }
-
-  /**
-   * Rewrite all the subqueries of a SelectStmt in place. Subqueries
-   * are currently supported in FROM and WHERE clauses. The rewrite is performed in
-   * place and not in a clone of SelectStmt because it requires the stmt to be analyzed.
-   */
-  private static void rewriteSelectStatement(SelectStmt stmt, Analyzer analyzer)
-      throws AnalysisException {
-    // Rewrite all the subqueries in the FROM clause.
-    for (TableRef tblRef: stmt.fromClause_) {
-      if (!(tblRef instanceof InlineViewRef)) continue;
-      InlineViewRef inlineViewRef = (InlineViewRef)tblRef;
-      rewriteQueryStatement(inlineViewRef.getViewStmt(), inlineViewRef.getAnalyzer());
-    }
-    // Rewrite all the subqueries in the WHERE clause.
-    if (stmt.hasWhereClause()) {
-      // Push negation to leaf operands.
-      stmt.whereClause_ = Expr.pushNegationToOperands(stmt.whereClause_);
-      // Check if we can rewrite the subqueries in the WHERE clause. OR predicates with
-      // subqueries are not supported.
-      if (hasSubqueryInDisjunction(stmt.whereClause_)) {
-        throw new AnalysisException("Subqueries in OR predicates are not supported: " +
-            stmt.whereClause_.toSql());
-      }
-      rewriteWhereClauseSubqueries(stmt, analyzer);
-    }
-    stmt.sqlString_ = null;
-    LOG.trace("rewritten stmt: " + stmt.toSql());
-  }
-
-  /**
-   * Rewrite all operands in a UNION. The conditions that apply to SelectStmt rewriting
-   * also apply here.
-   */
-  private static void rewriteUnionStatement(UnionStmt stmt, Analyzer analyzer)
-      throws AnalysisException {
-    for (UnionOperand operand: stmt.getOperands()) {
-      Preconditions.checkState(operand.getQueryStmt() instanceof SelectStmt);
-      StmtRewriter.rewriteSelectStatement(
-          (SelectStmt)operand.getQueryStmt(), operand.getAnalyzer());
-    }
-  }
-
-  /**
-   * Returns true if the Expr tree rooted at 'expr' has at least one subquery
-   * that participates in a disjunction.
-   */
-  private static boolean hasSubqueryInDisjunction(Expr expr) {
-    if (!(expr instanceof CompoundPredicate)) return false;
-    if (Expr.IS_OR_PREDICATE.apply(expr)) {
-      return expr.contains(Subquery.class);
-    }
-    for (Expr child: expr.getChildren()) {
-      if (hasSubqueryInDisjunction(child)) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Rewrite all subqueries of a stmt's WHERE clause. Initially, all the
-   * conjuncts containing subqueries are extracted from the WHERE clause and are
-   * replaced with true BoolLiterals. Subsequently, each extracted conjunct is
-   * merged into its parent select block by converting it into a join.
-   * Conjuncts with subqueries that themselves contain conjuncts with subqueries are
-   * recursively rewritten in a bottom up fashion.
-   *
-   * The following example illustrates the bottom up rewriting of nested queries.
-   * Suppose we have the following three level nested query Q0:
-   *
-   * SELECT *
-   * FROM T1                                            : Q0
-   * WHERE T1.a IN (SELECT a
-   *                FROM T2 WHERE T2.b IN (SELECT b
-   *                                       FROM T3))
-   * AND T1.c < 10;
-   *
-   * This query will be rewritten as follows. Initially, the IN predicate
-   * T1.a IN (SELECT a FROM T2 WHERE T2.b IN (SELECT b FROM T3)) is extracted
-   * from the top level block (Q0) since it contains a subquery and is
-   * replaced by a true BoolLiteral, resulting in the following query Q1:
-   *
-   * SELECT * FROM T1 WHERE TRUE : Q1
-   *
-   * Since the stmt in the extracted predicate contains a conjunct with a subquery,
-   * it is also rewritten. As before, rewriting stmt SELECT a FROM T2
-   * WHERE T2.b IN (SELECT b FROM T3) works by first extracting the conjunct that
-   * contains the subquery (T2.b IN (SELECT b FROM T3)) and substituting it with
-   * a true BoolLiteral, producing the following stmt Q2:
-   *
-   * SELECT a FROM T2 WHERE TRUE : Q2
-   *
-   * The predicate T2.b IN (SELECT b FROM T3) is then merged with Q2,
-   * producing the following unnested query Q3:
-   *
-   * SELECT a FROM T2 LEFT SEMI JOIN (SELECT b FROM T3) $a$1 ON T2.b = $a$1.b : Q3
-   *
-   * The extracted IN predicate becomes:
-   *
-   * T1.a IN (SELECT a FROM T2 LEFT SEMI JOIN (SELECT b FROM T3) $a$1 ON T2.b = $a$1.b)
-   *
-   * Finally, the rewritten IN predicate is merged with query block Q1,
-   * producing the following unnested query (WHERE clauses that contain only
-   * conjunctions of true BoolLiterals are eliminated):
-   *
-   * SELECT *
-   * FROM T1 LEFT SEMI JOIN (SELECT a
-   *                         FROM T2 LEFT SEMI JOIN (SELECT b FROM T3) $a$1
-   *                         ON T2.b = $a$1.b) $a$1
-   * ON $a$1.a = T1.a
-   * WHERE T1.c < 10;
-   *
-   */
-  private static void rewriteWhereClauseSubqueries(SelectStmt stmt, Analyzer analyzer)
-     throws AnalysisException {
-    int numTableRefs = stmt.fromClause_.size();
-    ArrayList<Expr> exprsWithSubqueries = Lists.newArrayList();
-    ExprSubstitutionMap smap = new ExprSubstitutionMap();
-    // Replace all BetweenPredicates with their equivalent compound predicates.
-    stmt.whereClause_ = rewriteBetweenPredicates(stmt.whereClause_);
-    // Check if all the conjuncts in the WHERE clause that contain subqueries
-    // can currently be rewritten as a join.
-    for (Expr conjunct: stmt.whereClause_.getConjuncts()) {
-      List<Subquery> subqueries = Lists.newArrayList();
-      conjunct.collectAll(Predicates.instanceOf(Subquery.class), subqueries);
-      if (subqueries.size() == 0) continue;
-      if (subqueries.size() > 1) {
-        throw new AnalysisException("Multiple subqueries are not supported in " +
-            "expression: " + conjunct.toSql());
-      }
-      if (!(conjunct instanceof InPredicate) && !(conjunct instanceof ExistsPredicate) &&
-          !(conjunct instanceof BinaryPredicate) &&
-          !conjunct.contains(Expr.IS_SCALAR_SUBQUERY)) {
-        throw new AnalysisException("Non-scalar subquery is not supported in " +
-            "expression: " + conjunct.toSql());
-      }
-
-      if (conjunct instanceof ExistsPredicate) {
-        // Check if we can determine the result of an ExistsPredicate during analysis.
-        // If so, replace the predicate with a BoolLiteral predicate and remove it from
-        // the list of predicates to be rewritten.
-        BoolLiteral boolLiteral = replaceExistsPredicate((ExistsPredicate) conjunct);
-        if (boolLiteral != null) {
-          boolLiteral.analyze(analyzer);
-          smap.put(conjunct, boolLiteral);
-          continue;
-        }
-      }
-
-      // Replace all the supported exprs with subqueries with true BoolLiterals
-      // using an smap.
-      BoolLiteral boolLiteral = new BoolLiteral(true);
-      boolLiteral.analyze(analyzer);
-      smap.put(conjunct, boolLiteral);
-      exprsWithSubqueries.add(conjunct);
-    }
-    stmt.whereClause_ = stmt.whereClause_.substitute(smap, analyzer, false);
-
-    boolean hasNewVisibleTuple = false;
-    // Recursively rewrite all the exprs that contain subqueries and merge them
-    // with 'stmt'.
-    for (Expr expr: exprsWithSubqueries) {
-      if (mergeExpr(stmt, rewriteExpr(expr, analyzer), analyzer)) {
-        hasNewVisibleTuple = true;
-      }
-    }
-    if (canEliminate(stmt.whereClause_)) stmt.whereClause_ = null;
-    if (hasNewVisibleTuple) replaceUnqualifiedStarItems(stmt, numTableRefs);
-  }
-
-  /**
-   * Replace an ExistsPredicate that contains a subquery with a BoolLiteral if we
-   * can determine its result without evaluating it. Return null if the result of the
-   * ExistsPredicate can only be determined at run-time.
-   */
-  private static BoolLiteral replaceExistsPredicate(ExistsPredicate predicate) {
-    Subquery subquery = predicate.getSubquery();
-    Preconditions.checkNotNull(subquery);
-    SelectStmt subqueryStmt = (SelectStmt) subquery.getStatement();
-    BoolLiteral boolLiteral = null;
-    if (subqueryStmt.getAnalyzer().hasEmptyResultSet()) {
-      boolLiteral = new BoolLiteral(predicate.isNotExists());
-    } else if (subqueryStmt.hasAggInfo() && subqueryStmt.getAggInfo().hasAggregateExprs()
-          && !subqueryStmt.hasAnalyticInfo() && subqueryStmt.getHavingPred() == null) {
-      boolLiteral = new BoolLiteral(!predicate.isNotExists());
-    }
-    return boolLiteral;
-  }
-
-  /**
-   * Replace all BetweenPredicates with their equivalent compound predicates from the
-   * expr tree rooted at 'expr'. The modified expr tree is returned.
-   */
-  private static Expr rewriteBetweenPredicates(Expr expr) {
-    if (expr instanceof BetweenPredicate) {
-      return ((BetweenPredicate)expr).getRewrittenPredicate();
-    }
-    for (int i = 0; i < expr.getChildren().size(); ++i) {
-      expr.setChild(i, rewriteBetweenPredicates(expr.getChild(i)));
-    }
-    return expr;
-  }
-
-  /**
-   * Modifies in place an expr that contains a subquery by rewriting its
-   * subquery stmt. The modified analyzed expr is returned.
-   */
-  private static Expr rewriteExpr(Expr expr, Analyzer analyzer)
-      throws AnalysisException {
-    // Extract the subquery and rewrite it.
-    Subquery subquery = expr.getSubquery();
-    Preconditions.checkNotNull(subquery);
-    rewriteSelectStatement((SelectStmt) subquery.getStatement(), subquery.getAnalyzer());
-    // Create a new Subquery with the rewritten stmt and use a substitution map
-    // to replace the original subquery from the expr.
-    QueryStmt rewrittenStmt = subquery.getStatement().clone();
-    rewrittenStmt.reset();
-    Subquery newSubquery = new Subquery(rewrittenStmt);
-    newSubquery.analyze(analyzer);
-    ExprSubstitutionMap smap = new ExprSubstitutionMap();
-    smap.put(subquery, newSubquery);
-    return expr.substitute(smap, analyzer, false);
-  }
-
-  /**
-   * Merge an expr containing a subquery with a SelectStmt 'stmt' by
-   * converting the subquery stmt of the former into an inline view and
-   * creating a join between the new inline view and the right-most table
-   * from 'stmt'. Return true if the rewrite introduced a new visible tuple
-   * due to a CROSS JOIN or a LEFT OUTER JOIN.
-   *
-   * This process works as follows:
-   * 1. Create a new inline view with the subquery as the view's stmt. Changes
-   *    made to the subquery's stmt will affect the inline view.
-   * 2. Extract all correlated predicates from the subquery's WHERE
-   *    clause; the subquery's select list may be extended with new items and a
-   *    GROUP BY clause may be added.
-   * 3. Add the inline view to stmt's tableRefs and create a
-   *    join (left semi join, anti-join, left outer join for agg functions
-   *    that return a non-NULL value for an empty input, or cross-join) with
-   *    stmt's right-most table.
-   * 4. Initialize the ON clause of the new join from the original subquery
-   *    predicate and the new inline view.
-   * 5. Apply expr substitutions such that the extracted correlated predicates
-   *    refer to columns of the new inline view.
-   * 6. Add all extracted correlated predicates to the ON clause.
-   */
-  private static boolean mergeExpr(SelectStmt stmt, Expr expr,
-      Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkNotNull(analyzer);
-    boolean updateSelectList = false;
-
-    SelectStmt subqueryStmt = (SelectStmt)expr.getSubquery().getStatement();
-    // Create a new inline view from the subquery stmt. The inline view will be added
-    // to the stmt's table refs later. Explicitly set the inline view's column labels
-    // to eliminate any chance that column aliases from the parent query could reference
-    // select items from the inline view after the rewrite.
-    List<String> colLabels = Lists.newArrayList();
-    for (int i = 0; i < subqueryStmt.getColLabels().size(); ++i) {
-      colLabels.add(subqueryStmt.getColumnAliasGenerator().getNextAlias());
-    }
-    InlineViewRef inlineView = new InlineViewRef(
-        stmt.getTableAliasGenerator().getNextAlias(), subqueryStmt, colLabels);
-
-    // Extract all correlated predicates from the subquery.
-    List<Expr> onClauseConjuncts = extractCorrelatedPredicates(subqueryStmt);
-    if (!onClauseConjuncts.isEmpty()) {
-      canRewriteCorrelatedSubquery(expr, onClauseConjuncts);
-      // For correlated subqueries that are eligible for rewrite by transforming
-      // into a join, a LIMIT clause has no effect on the results, so we can
-      // safely remove it.
-      subqueryStmt.limitElement_ = new LimitElement(null, null);
-    }
-
-    // Update the subquery's select list and/or its GROUP BY clause by adding
-    // exprs from the extracted correlated predicates.
-    boolean updateGroupBy = expr.getSubquery().isScalarSubquery()
-        || (expr instanceof ExistsPredicate
-            && !subqueryStmt.getSelectList().isDistinct()
-            && subqueryStmt.hasAggInfo());
-    List<Expr> lhsExprs = Lists.newArrayList();
-    List<Expr> rhsExprs = Lists.newArrayList();
-    for (Expr conjunct: onClauseConjuncts) {
-      updateInlineView(inlineView, conjunct, stmt.getTableRefIds(),
-          lhsExprs, rhsExprs, updateGroupBy);
-    }
-
-    // Analyzing the inline view triggers reanalysis of the subquery's select statement.
-    // However the statement is already analyzed and since statement analysis is not
-    // idempotent, the analysis needs to be reset.
-    inlineView.reset();
-    inlineView.analyze(analyzer);
-    inlineView.setLeftTblRef(stmt.fromClause_.get(stmt.fromClause_.size() - 1));
-    stmt.fromClause_.add(inlineView);
-    JoinOperator joinOp = JoinOperator.LEFT_SEMI_JOIN;
-
-    // Create a join conjunct from the expr that contains a subquery.
-    Expr joinConjunct = createJoinConjunct(expr, inlineView, analyzer,
-        !onClauseConjuncts.isEmpty());
-    if (joinConjunct != null) {
-      SelectListItem firstItem =
-          ((SelectStmt) inlineView.getViewStmt()).getSelectList().getItems().get(0);
-      if (!onClauseConjuncts.isEmpty() &&
-          firstItem.getExpr().contains(Expr.NON_NULL_EMPTY_AGG)) {
-        // Correlated subqueries with an aggregate function that returns non-null on
-        // an empty input are rewritten using a LEFT OUTER JOIN because we
-        // need to ensure that there is one agg value for every tuple of 'stmt'
-        // (parent select block), even for those tuples of 'stmt' that get rejected
-        // by the subquery due to some predicate. The new join conjunct is added to
-        // stmt's WHERE clause because it needs to be applied to the result of the
-        // LEFT OUTER JOIN (both matched and unmatched tuples).
-        //
-        // TODO Handle other aggregate functions and UDAs that return a non-NULL value
-        // on an empty set.
-        // TODO Handle count aggregate functions in an expression in subqueries
-        // select list.
-        stmt.whereClause_ =
-            CompoundPredicate.createConjunction(joinConjunct, stmt.whereClause_);
-        joinConjunct = null;
-        joinOp = JoinOperator.LEFT_OUTER_JOIN;
-        updateSelectList = true;
-      }
-
-      if (joinConjunct != null) onClauseConjuncts.add(joinConjunct);
-    }
-
-    // Create the ON clause from the extracted correlated predicates.
-    Expr onClausePredicate =
-        CompoundPredicate.createConjunctivePredicate(onClauseConjuncts);
-
-    if (onClausePredicate == null) {
-      Preconditions.checkState(expr instanceof ExistsPredicate);
-      ExistsPredicate existsPred = (ExistsPredicate) expr;
-      // TODO This is very expensive if uncorrelated. Remove it when we implement
-      // independent subquery evaluation.
-      if (existsPred.isNotExists()) {
-        inlineView.setJoinOp(JoinOperator.LEFT_ANTI_JOIN);
-      } else {
-        inlineView.setJoinOp(JoinOperator.LEFT_SEMI_JOIN);
-      }
-      // Note that the concept of a 'correlated inline view' is similar but not the same
-      // as a 'correlated subquery', i.e., a subquery with a correlated predicate.
-      if (!inlineView.isCorrelated()) {
-        // For uncorrelated subqueries, we limit the number of rows returned by the
-        // subquery.
-        subqueryStmt.setLimit(1);
-        inlineView.setOnClause(new BoolLiteral(true));
-      }
-      return false;
-    }
-
-    // Create an smap from the original select-list exprs of the select list to
-    // the corresponding inline-view columns.
-    ExprSubstitutionMap smap = new ExprSubstitutionMap();
-    Preconditions.checkState(lhsExprs.size() == rhsExprs.size());
-    for (int i = 0; i < lhsExprs.size(); ++i) {
-      Expr lhsExpr = lhsExprs.get(i);
-      Expr rhsExpr = rhsExprs.get(i);
-      rhsExpr.analyze(analyzer);
-      smap.put(lhsExpr, rhsExpr);
-    }
-    onClausePredicate = onClausePredicate.substitute(smap, analyzer, false);
-
-    // Check for references to ancestor query blocks (cycles in the dependency
-    // graph of query blocks are not supported).
-    if (!onClausePredicate.isBoundByTupleIds(stmt.getTableRefIds())) {
-      throw new AnalysisException("Unsupported correlated subquery: " +
-          subqueryStmt.toSql());
-    }
-
-    // Check if we have a valid ON clause for an equi-join.
-    boolean hasEqJoinPred = false;
-    for (Expr conjunct: onClausePredicate.getConjuncts()) {
-      if (!(conjunct instanceof BinaryPredicate)) continue;
-      BinaryPredicate.Operator operator = ((BinaryPredicate) conjunct).getOp();
-      if (!operator.isEquivalence()) continue;
-      List<TupleId> lhsTupleIds = Lists.newArrayList();
-      conjunct.getChild(0).getIds(lhsTupleIds, null);
-      if (lhsTupleIds.isEmpty()) continue;
-      List<TupleId> rhsTupleIds = Lists.newArrayList();
-      conjunct.getChild(1).getIds(rhsTupleIds, null);
-      if (rhsTupleIds.isEmpty()) continue;
-      // Check if columns from the outer query block (stmt) appear in both sides
-      // of the binary predicate.
-      if ((lhsTupleIds.contains(inlineView.getDesc().getId()) && lhsTupleIds.size() > 1)
-          || (rhsTupleIds.contains(inlineView.getDesc().getId())
-              && rhsTupleIds.size() > 1)) {
-        continue;
-      }
-      hasEqJoinPred = true;
-      break;
-    }
-
-    if (!hasEqJoinPred && !inlineView.isCorrelated()) {
-      // TODO: Remove this when independent subquery evaluation is implemented.
-      // TODO: Requires support for non-equi joins.
-      boolean hasGroupBy = ((SelectStmt) inlineView.getViewStmt()).hasGroupByClause();
-      if (!expr.getSubquery().isScalarSubquery() ||
-          (!(hasGroupBy && stmt.selectList_.isDistinct()) && hasGroupBy)) {
-        throw new AnalysisException("Unsupported predicate with subquery: " +
-            expr.toSql());
-      }
-
-      // TODO: Requires support for null-aware anti-join mode in nested-loop joins
-      if (expr.getSubquery().isScalarSubquery() && expr instanceof InPredicate
-          && ((InPredicate) expr).isNotIn()) {
-        throw new AnalysisException("Unsupported NOT IN predicate with subquery: " +
-            expr.toSql());
-      }
-
-      // We can rewrite the aggregate subquery using a cross join. All conjuncts
-      // that were extracted from the subquery are added to stmt's WHERE clause.
-      stmt.whereClause_ =
-          CompoundPredicate.createConjunction(onClausePredicate, stmt.whereClause_);
-      inlineView.setJoinOp(JoinOperator.CROSS_JOIN);
-      // Indicate that the CROSS JOIN may add a new visible tuple to stmt's
-      // select list (if the latter contains an unqualified star item '*')
-      return true;
-    }
-
-    // We have a valid equi-join conjunct or the inline view is correlated.
-    if (expr instanceof InPredicate && ((InPredicate)expr).isNotIn() ||
-        expr instanceof ExistsPredicate && ((ExistsPredicate)expr).isNotExists()) {
-      // For the case of a NOT IN with an eq join conjunct, replace the join
-      // conjunct with a conjunct that uses the null-matching eq operator.
-      if (expr instanceof InPredicate) {
-        joinOp = JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-        List<TupleId> tIds = Lists.newArrayList();
-        joinConjunct.getIds(tIds, null);
-        if (tIds.size() <= 1 || !tIds.contains(inlineView.getDesc().getId())) {
-          throw new AnalysisException("Unsupported NOT IN predicate with subquery: " +
-              expr.toSql());
-        }
-        // Replace the EQ operator in the generated join conjunct with a
-        // null-matching EQ operator.
-        for (Expr conjunct: onClausePredicate.getConjuncts()) {
-          if (conjunct.equals(joinConjunct)) {
-            Preconditions.checkState(conjunct instanceof BinaryPredicate);
-            BinaryPredicate binaryPredicate = (BinaryPredicate)conjunct;
-            Preconditions.checkState(binaryPredicate.getOp().isEquivalence());
-            binaryPredicate.setOp(BinaryPredicate.Operator.NULL_MATCHING_EQ);
-            break;
-          }
-        }
-      } else {
-        joinOp = JoinOperator.LEFT_ANTI_JOIN;
-      }
-    }
-    inlineView.setJoinOp(joinOp);
-    inlineView.setOnClause(onClausePredicate);
-    return updateSelectList;
-  }
-
-  /**
-   * Replace all unqualified star exprs ('*') from stmt's select list with qualified
-   * ones, i.e. tbl_1.*,...,tbl_n.*, where tbl_1,...,tbl_n are the visible tablerefs
-   * in stmt. 'tableIndx' indicates the maximum tableRef ordinal to consider when
-   * replacing an unqualified star item.
-   */
-  private static void replaceUnqualifiedStarItems(SelectStmt stmt, int tableIdx) {
-    Preconditions.checkState(tableIdx < stmt.fromClause_.size());
-    ArrayList<SelectListItem> newItems = Lists.newArrayList();
-    for (int i = 0; i < stmt.selectList_.getItems().size(); ++i) {
-      SelectListItem item = stmt.selectList_.getItems().get(i);
-      if (!item.isStar() || item.getRawPath() != null) {
-        newItems.add(item);
-        continue;
-      }
-      // '*' needs to be replaced by tbl1.*,...,tbln.*, where
-      // tbl1,...,tbln are the visible tableRefs in stmt.
-      for (int j = 0; j < tableIdx; ++j) {
-        TableRef tableRef = stmt.fromClause_.get(j);
-        if (tableRef.getJoinOp() == JoinOperator.LEFT_SEMI_JOIN ||
-            tableRef.getJoinOp() == JoinOperator.LEFT_ANTI_JOIN) {
-          continue;
-        }
-        newItems.add(SelectListItem.createStarItem(
-            Lists.newArrayList(tableRef.getUniqueAlias())));
-      }
-    }
-    Preconditions.checkState(!newItems.isEmpty());
-    boolean isDistinct = stmt.selectList_.isDistinct();
-    stmt.selectList_ =
-        new SelectList(newItems, isDistinct, stmt.selectList_.getPlanHints());
-  }
-
-  /**
-   * Return true if the Expr tree rooted at 'expr' can be safely
-   * eliminated, i.e. it only consists of conjunctions of true BoolLiterals.
-   */
-  private static boolean canEliminate(Expr expr) {
-    for (Expr conjunct: expr.getConjuncts()) {
-      if (!Expr.IS_TRUE_LITERAL.apply(conjunct)) return false;
-    }
-    return true;
-  }
-
-  /**
-   * Extract all correlated predicates of a subquery.
-   *
-   * TODO Handle correlated predicates in a HAVING clause.
-   */
-  private static ArrayList<Expr> extractCorrelatedPredicates(SelectStmt subqueryStmt)
-      throws AnalysisException {
-    List<TupleId> subqueryTupleIds = subqueryStmt.getTableRefIds();
-    ArrayList<Expr> correlatedPredicates = Lists.newArrayList();
-
-    if (subqueryStmt.hasWhereClause()) {
-      if (!canExtractCorrelatedPredicates(subqueryStmt.getWhereClause(),
-          subqueryTupleIds)) {
-        throw new AnalysisException("Disjunctions with correlated predicates " +
-            "are not supported: " + subqueryStmt.getWhereClause().toSql());
-      }
-      // Extract the correlated predicates from the subquery's WHERE clause and
-      // replace them with true BoolLiterals.
-      Expr newWhereClause = extractCorrelatedPredicates(subqueryStmt.getWhereClause(),
-          subqueryTupleIds, correlatedPredicates);
-      if (canEliminate(newWhereClause)) newWhereClause = null;
-      subqueryStmt.setWhereClause(newWhereClause);
-    }
-
-    // Process all correlated predicates from subquery's ON clauses.
-    for (TableRef tableRef: subqueryStmt.getTableRefs()) {
-      if (tableRef.getOnClause() == null) continue;
-
-      ArrayList<Expr> onClauseCorrelatedPreds = Lists.newArrayList();
-      Expr newOnClause = extractCorrelatedPredicates(tableRef.getOnClause(),
-          subqueryTupleIds, onClauseCorrelatedPreds);
-      if (onClauseCorrelatedPreds.isEmpty()) continue;
-
-      correlatedPredicates.addAll(onClauseCorrelatedPreds);
-      if (canEliminate(newOnClause)) {
-        // After the extraction of correlated predicates from an ON clause,
-        // the latter may only contain conjunctions of True BoolLiterals. In
-        // this case, we can eliminate the ON clause and set the join type to
-        // CROSS JOIN.
-        tableRef.setJoinOp(JoinOperator.CROSS_JOIN);
-        tableRef.setOnClause(null);
-      } else {
-        tableRef.setOnClause(newOnClause);
-      }
-    }
-    return correlatedPredicates;
-  }
-
-  /**
-   * Extract all correlated predicates from the expr tree rooted at 'root' and
-   * replace them with true BoolLiterals. The modified expr tree is returned
-   * and the extracted correlated predicates are added to 'matches'.
-   */
-  private static Expr extractCorrelatedPredicates(Expr root, List<TupleId> tupleIds,
-      ArrayList<Expr> matches) {
-    if (isCorrelatedPredicate(root, tupleIds)) {
-      matches.add(root);
-      return new BoolLiteral(true);
-    }
-    for (int i = 0; i < root.getChildren().size(); ++i) {
-      root.getChildren().set(i, extractCorrelatedPredicates(root.getChild(i), tupleIds,
-          matches));
-    }
-    return root;
-  }
-
-  /**
-   * Checks if an expr containing a correlated subquery is eligible for rewrite by
-   * tranforming into a join. 'correlatedPredicates' contains the correlated
-   * predicates identified in the subquery. Throws an AnalysisException if 'expr'
-   * is not eligible for rewrite.
-   * TODO: Merge all the rewrite eligibility tests into a single function.
-   */
-  private static void canRewriteCorrelatedSubquery(Expr expr,
-      List<Expr> correlatedPredicates) throws AnalysisException {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkNotNull(correlatedPredicates);
-    Preconditions.checkState(expr.contains(Subquery.class));
-    SelectStmt stmt = (SelectStmt) expr.getSubquery().getStatement();
-    Preconditions.checkNotNull(stmt);
-    // Grouping and/or aggregation is not allowed on correlated scalar and IN subqueries
-    if ((expr instanceof BinaryPredicate
-          && (stmt.hasGroupByClause() || stmt.hasAnalyticInfo()))
-        || (expr instanceof InPredicate
-            && (stmt.hasAggInfo() || stmt.hasAnalyticInfo()))) {
-      throw new AnalysisException("Unsupported correlated subquery with grouping " +
-          "and/or aggregation: " + stmt.toSql());
-    }
-
-    final com.google.common.base.Predicate<Expr> isSingleSlotRef =
-        new com.google.common.base.Predicate<Expr>() {
-      @Override
-      public boolean apply(Expr arg) { return arg.unwrapSlotRef(false) != null; }
-    };
-
-    // A HAVING clause is only allowed on correlated EXISTS subqueries with
-    // correlated binary predicates of the form Slot = Slot (see IMPALA-2734)
-    // TODO Handle binary predicates with IS NOT DISTINCT op
-    if (expr instanceof ExistsPredicate && stmt.hasHavingClause()
-        && !correlatedPredicates.isEmpty()
-        && (!stmt.hasAggInfo()
-            || !Iterables.all(correlatedPredicates,
-                Predicates.or(Expr.IS_EQ_BINARY_PREDICATE, isSingleSlotRef)))) {
-      throw new AnalysisException("Unsupported correlated EXISTS subquery with a " +
-          "HAVING clause: " + stmt.toSql());
-    }
-
-    // The following correlated subqueries with a limit clause are supported:
-    // 1. EXISTS subqueries
-    // 2. Scalar subqueries with aggregation
-    if (stmt.hasLimit() &&
-        (!(expr instanceof BinaryPredicate) || !stmt.hasAggInfo() ||
-         stmt.selectList_.isDistinct()) &&
-        !(expr instanceof ExistsPredicate)) {
-      throw new AnalysisException("Unsupported correlated subquery with a " +
-          "LIMIT clause: " + stmt.toSql());
-    }
-  }
-
-  /**
-   * Update the subquery within an inline view by expanding its select list with exprs
-   * from a correlated predicate 'expr' that will be 'moved' to an ON clause in the
-   * subquery's parent query block. We need to make sure that every expr extracted from
-   * the subquery references an item in the subquery's select list. If 'updateGroupBy'
-   * is true, the exprs extracted from 'expr' are also added in stmt's GROUP BY clause.
-   * Throws an AnalysisException if we need to update the GROUP BY clause but
-   * both the lhs and rhs of 'expr' reference a tuple of the subquery stmt.
-   */
-  private static void updateInlineView(InlineViewRef inlineView, Expr expr,
-      List<TupleId> parentQueryTids, List<Expr> lhsExprs, List<Expr> rhsExprs,
-      boolean updateGroupBy) throws AnalysisException {
-    SelectStmt stmt = (SelectStmt)inlineView.getViewStmt();
-    List<TupleId> subqueryTblIds = stmt.getTableRefIds();
-    ArrayList<Expr> groupByExprs = null;
-    if (updateGroupBy) groupByExprs = Lists.newArrayList();
-
-    List<SelectListItem> items = stmt.selectList_.getItems();
-    // Collect all the SlotRefs from 'expr' and identify those that are bound by
-    // subquery tuple ids.
-    ArrayList<Expr> slotRefs = Lists.newArrayList();
-    expr.collectAll(Predicates.instanceOf(SlotRef.class), slotRefs);
-    List<Expr> exprsBoundBySubqueryTids = Lists.newArrayList();
-    for (Expr slotRef: slotRefs) {
-      if (slotRef.isBoundByTupleIds(subqueryTblIds)) {
-        exprsBoundBySubqueryTids.add(slotRef);
-      }
-    }
-    // The correlated predicate only references slots from a parent block,
-    // no need to update the subquery's select or group by list.
-    if (exprsBoundBySubqueryTids.isEmpty()) return;
-    if (updateGroupBy) {
-      Preconditions.checkState(expr instanceof BinaryPredicate);
-      Expr exprBoundBySubqueryTids = null;
-      if (exprsBoundBySubqueryTids.size() > 1) {
-        // If the predicate contains multiple SlotRefs bound by subquery tuple
-        // ids, they must all be on the same side of that predicate.
-        if (expr.getChild(0).isBoundByTupleIds(subqueryTblIds) &&
-           expr.getChild(1).isBoundByTupleIds(parentQueryTids)) {
-          exprBoundBySubqueryTids = expr.getChild(0);
-        } else if (expr.getChild(0).isBoundByTupleIds(parentQueryTids) &&
-            expr.getChild(1).isBoundByTupleIds(subqueryTblIds)) {
-          exprBoundBySubqueryTids = expr.getChild(1);
-        } else {
-          throw new AnalysisException("All subquery columns " +
-              "that participate in a predicate must be on the same side of " +
-              "that predicate: " + expr.toSql());
-        }
-      } else {
-        Preconditions.checkState(exprsBoundBySubqueryTids.size() == 1);
-        exprBoundBySubqueryTids = exprsBoundBySubqueryTids.get(0);
-      }
-      exprsBoundBySubqueryTids.clear();
-      exprsBoundBySubqueryTids.add(exprBoundBySubqueryTids);
-    }
-
-    // Add the exprs bound by subquery tuple ids to the select list and
-    // register it for substitution. We use a temporary substitution map
-    // because we cannot at this point analyze the new select list expr. Once
-    // the new inline view is analyzed, the entries from this map will be
-    // added to an ExprSubstitutionMap.
-    for (Expr boundExpr: exprsBoundBySubqueryTids) {
-      String colAlias = stmt.getColumnAliasGenerator().getNextAlias();
-      items.add(new SelectListItem(boundExpr, null));
-      inlineView.getExplicitColLabels().add(colAlias);
-      lhsExprs.add(boundExpr);
-      rhsExprs.add(new SlotRef(Lists.newArrayList(inlineView.getUniqueAlias(), colAlias)));
-      if (groupByExprs != null) groupByExprs.add(boundExpr);
-    }
-
-    // Update the subquery's select list.
-    boolean isDistinct = stmt.selectList_.isDistinct();
-    stmt.selectList_ = new SelectList(
-        items, isDistinct, stmt.selectList_.getPlanHints());
-    // Update subquery's GROUP BY clause
-    if (groupByExprs != null && !groupByExprs.isEmpty()) {
-      if (stmt.hasGroupByClause()) {
-        stmt.groupingExprs_.addAll(groupByExprs);
-      } else {
-        stmt.groupingExprs_ = groupByExprs;
-      }
-    }
-  }
-
-  /**
-   * Returns true if we can extract the correlated predicates from 'expr'. A
-   * correlated predicate cannot be extracted if it is part of a disjunction.
-   */
-  private static boolean canExtractCorrelatedPredicates(Expr expr,
-      List<TupleId> subqueryTupleIds) {
-    if (!(expr instanceof CompoundPredicate)) return true;
-    if (Expr.IS_OR_PREDICATE.apply(expr)) {
-      return !containsCorrelatedPredicate(expr, subqueryTupleIds);
-    }
-    for (Expr child: expr.getChildren()) {
-      if (!canExtractCorrelatedPredicates(child, subqueryTupleIds)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Return true if the expr tree rooted at 'root' contains a correlated
-   * predicate.
-   */
-  private static boolean containsCorrelatedPredicate(Expr root, List<TupleId> tupleIds) {
-    if (isCorrelatedPredicate(root, tupleIds)) return true;
-    for (Expr child: root.getChildren()) {
-      if (containsCorrelatedPredicate(child, tupleIds)) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Returns true if 'expr' is a correlated predicate. A predicate is
-   * correlated if at least one of its SlotRefs belongs to an ancestor
-   * query block (i.e. is not bound by the given 'tupleIds').
-   */
-  private static boolean isCorrelatedPredicate(Expr expr, List<TupleId> tupleIds) {
-    return (expr instanceof BinaryPredicate || expr instanceof SlotRef)
-        && !expr.isBoundByTupleIds(tupleIds);
-  }
-
-  /**
-   * Converts an expr containing a subquery into an analyzed conjunct to be
-   * used in a join. The conversion is performed in place by replacing the
-   * subquery with the first expr from the select list of 'inlineView'.
-   * If 'isCorrelated' is true and the first expr from the inline view contains
-   * an aggregate function that returns non-null on an empty input,
-   * the aggregate function is wrapped into a 'zeroifnull' function.
-   */
-  private static Expr createJoinConjunct(Expr exprWithSubquery, InlineViewRef inlineView,
-      Analyzer analyzer, boolean isCorrelated) throws AnalysisException {
-    Preconditions.checkNotNull(exprWithSubquery);
-    Preconditions.checkNotNull(inlineView);
-    Preconditions.checkState(exprWithSubquery.contains(Subquery.class));
-    if (exprWithSubquery instanceof ExistsPredicate) return null;
-    // Create a SlotRef from the first item of inlineView's select list
-    SlotRef slotRef = new SlotRef(Lists.newArrayList(
-        inlineView.getUniqueAlias(), inlineView.getColLabels().get(0)));
-    slotRef.analyze(analyzer);
-    Expr subquerySubstitute = slotRef;
-    if (exprWithSubquery instanceof InPredicate) {
-      BinaryPredicate pred = new BinaryPredicate(BinaryPredicate.Operator.EQ,
-          exprWithSubquery.getChild(0), slotRef);
-      pred.analyze(analyzer);
-      return pred;
-    }
-    // Only scalar subqueries are supported
-    Subquery subquery = exprWithSubquery.getSubquery();
-    if (!subquery.isScalarSubquery()) {
-      throw new AnalysisException("Unsupported predicate with a non-scalar subquery: "
-          + subquery.toSql());
-    }
-    ExprSubstitutionMap smap = new ExprSubstitutionMap();
-    SelectListItem item =
-      ((SelectStmt) inlineView.getViewStmt()).getSelectList().getItems().get(0);
-    if (isCorrelated && !item.getExpr().contains(Expr.IS_BUILTIN_AGG_FN)) {
-      throw new AnalysisException("UDAs are not supported in the select list of " +
-          "correlated subqueries: " + subquery.toSql());
-    }
-    if (isCorrelated && item.getExpr().contains(Expr.NON_NULL_EMPTY_AGG)) {
-      // TODO: Add support for multiple agg functions that return non-null on an
-      // empty input, by wrapping them with zeroifnull functions before the inline
-      // view is analyzed.
-      if (!Expr.NON_NULL_EMPTY_AGG.apply(item.getExpr()) &&
-        (!(item.getExpr() instanceof CastExpr) ||
-         !Expr.NON_NULL_EMPTY_AGG.apply(item.getExpr().getChild(0)))) {
-        throw new AnalysisException("Aggregate function that returns non-null on " +
-          "an empty input cannot be used in an expression in a " +
-          "correlated subquery's select list: " + subquery.toSql());
-      }
-
-      List<Expr> aggFns = Lists.newArrayList();
-      item.getExpr().collectAll(Expr.NON_NULL_EMPTY_AGG, aggFns);
-      // TODO Generalize this by making the aggregate functions aware of the
-      // literal expr that they return on empty input, e.g. max returns a
-      // NullLiteral whereas count returns a NumericLiteral.
-      if (((FunctionCallExpr)aggFns.get(0)).getReturnType().isNumericType()) {
-        FunctionCallExpr zeroIfNull = new FunctionCallExpr("zeroifnull",
-            Lists.newArrayList((Expr) slotRef));
-        zeroIfNull.analyze(analyzer);
-        subquerySubstitute = zeroIfNull;
-      } else if (((FunctionCallExpr)aggFns.get(0)).getReturnType().isStringType()) {
-        List<Expr> params = Lists.newArrayList();
-        params.add(slotRef);
-        params.add(new StringLiteral(""));
-        FunctionCallExpr ifnull = new FunctionCallExpr("ifnull", params);
-        ifnull.analyze(analyzer);
-        subquerySubstitute = ifnull;
-      } else {
-        throw new AnalysisException("Unsupported aggregate function used in " +
-            "a correlated subquery's select list: " + subquery.toSql());
-      }
-    }
-    smap.put(subquery, subquerySubstitute);
-    return exprWithSubquery.substitute(smap, analyzer, false);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/StringLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/StringLiteral.java b/fe/src/main/java/com/cloudera/impala/analysis/StringLiteral.java
deleted file mode 100644
index be4820c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/StringLiteral.java
+++ /dev/null
@@ -1,174 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.math.BigDecimal;
-
-import java_cup.runtime.Symbol;
-
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TStringLiteral;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-public class StringLiteral extends LiteralExpr {
-  private final String value_;
-
-  public StringLiteral(String value) {
-    this.value_ = value;
-    type_ = ScalarType.STRING;
-    evalCost_ = LITERAL_COST;
-  }
-
-  public StringLiteral(String value, Type type) {
-    this.value_ = value;
-    type_ = type;
-    evalCost_ = LITERAL_COST;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected StringLiteral(StringLiteral other) {
-    super(other);
-    value_ = other.value_;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((StringLiteral) obj).value_.equals(value_);
-  }
-
-  @Override
-  public int hashCode() { return value_.hashCode(); }
-
-  @Override
-  public String toSqlImpl() {
-    return "'" + value_ + "'";
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.STRING_LITERAL;
-    msg.string_literal = new TStringLiteral(getUnescapedValue());
-  }
-
-  public String getValue() { return value_; }
-
-  public String getUnescapedValue() {
-    // Unescape string exactly like Hive does. Hive's method assumes
-    // quotes so we add them here to reuse Hive's code.
-    return BaseSemanticAnalyzer.unescapeSQLString("'" + value_ + "'");
-  }
-
-  @Override
-  public String getStringValue() {
-    return value_;
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("value", value_)
-        .toString();
-  }
-
-  @Override
-  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
-    Preconditions.checkState(targetType.isNumericType() || targetType.isDateType()
-        || targetType.equals(this.type_) || targetType.isStringType());
-    if (targetType.equals(this.type_)) {
-      return this;
-    } else if (targetType.isStringType()) {
-      type_ = targetType;
-    } else if (targetType.isNumericType()) {
-      return convertToNumber();
-    } else if (targetType.isDateType()) {
-      // Let the BE do the cast so it is in Boost format
-      return new CastExpr(targetType, this);
-    }
-    return this;
-  }
-
-  /**
-   * Convert this string literal to numeric literal.
-   *
-   * @return new converted literal (not null)
-   *         the type of the literal is determined by the lexical scanner
-   * @throws AnalysisException
-   *           if NumberFormatException occurs,
-   *           or if floating point value is NaN or infinite
-   */
-  public LiteralExpr convertToNumber()
-      throws AnalysisException {
-    StringReader reader = new StringReader(value_);
-    SqlScanner scanner = new SqlScanner(reader);
-    // For distinguishing positive and negative numbers.
-    boolean negative = false;
-    Symbol sym;
-    try {
-      // We allow simple chaining of MINUS to recognize negative numbers.
-      // Currently we can't handle string literals containing full fledged expressions
-      // which are implicitly cast to a numeric literal.
-      // This would require invoking the parser.
-      sym = scanner.next_token();
-      while (sym.sym == SqlParserSymbols.SUBTRACT) {
-        negative = !negative;
-        sym = scanner.next_token();
-      }
-    } catch (IOException e) {
-      throw new AnalysisException("Failed to convert string literal to number.", e);
-    }
-    if (sym.sym == SqlParserSymbols.NUMERIC_OVERFLOW) {
-      throw new AnalysisException("Number too large: " + value_);
-    }
-    if (sym.sym == SqlParserSymbols.INTEGER_LITERAL) {
-      BigDecimal val = (BigDecimal) sym.value;
-      if (negative) val = val.negate();
-      return new NumericLiteral(val);
-    }
-    if (sym.sym == SqlParserSymbols.DECIMAL_LITERAL) {
-      BigDecimal val = (BigDecimal) sym.value;
-      if (negative) val = val.negate();
-      return new NumericLiteral(val);
-    }
-    // Symbol is not an integer or floating point literal.
-    throw new AnalysisException(
-        "Failed to convert string literal '" + value_ + "' to number.");
-  }
-
-  @Override
-  public int compareTo(LiteralExpr o) {
-    int ret = super.compareTo(o);
-    if (ret != 0) return ret;
-    StringLiteral other = (StringLiteral) o;
-    return value_.compareTo(other.getStringValue());
-  }
-
-  @Override
-  public Expr clone() { return new StringLiteral(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/Subquery.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Subquery.java b/fe/src/main/java/com/cloudera/impala/analysis/Subquery.java
deleted file mode 100644
index d0e1b30..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/Subquery.java
+++ /dev/null
@@ -1,157 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Class representing a subquery. A Subquery consists of a QueryStmt and has
- * its own Analyzer context.
- */
-public class Subquery extends Expr {
-  private final static Logger LOG = LoggerFactory.getLogger(Subquery.class);
-
-  // The QueryStmt of the subquery.
-  protected QueryStmt stmt_;
-  // A subquery has its own analysis context
-  protected Analyzer analyzer_;
-
-  public Analyzer getAnalyzer() { return analyzer_; }
-  public QueryStmt getStatement() { return stmt_; }
-  @Override
-  public String toSqlImpl() { return "(" + stmt_.toSql() + ")"; }
-
-  /**
-   * C'tor that initializes a Subquery from a QueryStmt.
-   */
-  public Subquery(QueryStmt queryStmt) {
-    super();
-    Preconditions.checkNotNull(queryStmt);
-    stmt_ = queryStmt;
-  }
-
-  /**
-   * Copy c'tor.
-   */
-  public Subquery(Subquery other) {
-    super(other);
-    stmt_ = other.stmt_.clone();
-    analyzer_ = other.analyzer_;
-  }
-
-  /**
-   * Analyzes the subquery in a child analyzer.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!(stmt_ instanceof SelectStmt)) {
-      throw new AnalysisException("A subquery must contain a single select block: " +
-        toSql());
-    }
-    // The subquery is analyzed with its own analyzer.
-    analyzer_ = new Analyzer(analyzer);
-    analyzer_.setIsSubquery();
-    stmt_.analyze(analyzer_);
-    // Check whether the stmt_ contains an illegal mix of un/correlated table refs.
-    stmt_.getCorrelatedTupleIds(analyzer_);
-
-    // Set the subquery type based on the types of the exprs in the
-    // result list of the associated SelectStmt.
-    ArrayList<Expr> stmtResultExprs = stmt_.getResultExprs();
-    if (stmtResultExprs.size() == 1) {
-      type_ = stmtResultExprs.get(0).getType();
-      Preconditions.checkState(!type_.isComplexType());
-    } else {
-      type_ = createStructTypeFromExprList();
-    }
-
-    // If the subquery returns many rows, set its type to ArrayType.
-    if (!((SelectStmt)stmt_).returnsSingleRow()) type_ = new ArrayType(type_);
-
-    Preconditions.checkNotNull(type_);
-    isAnalyzed_ = true;
-  }
-
-  @Override
-  public boolean isConstant() { return false; }
-
-  /**
-   * Check if the subquery's SelectStmt returns a single column of scalar type.
-   */
-  public boolean returnsScalarColumn() {
-    ArrayList<Expr> stmtResultExprs = stmt_.getResultExprs();
-    if (stmtResultExprs.size() == 1 && stmtResultExprs.get(0).getType().isScalarType()) {
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Create a StrucType from the result expr list of a subquery's SelectStmt.
-   */
-  private StructType createStructTypeFromExprList() {
-    ArrayList<Expr> stmtResultExprs = stmt_.getResultExprs();
-    ArrayList<StructField> structFields = Lists.newArrayList();
-    // Check if we have unique labels
-    ArrayList<String> labels = stmt_.getColLabels();
-    boolean hasUniqueLabels = true;
-    if (Sets.newHashSet(labels).size() != labels.size()) hasUniqueLabels = false;
-
-    // Construct a StructField from each expr in the select list
-    for (int i = 0; i < stmtResultExprs.size(); ++i) {
-      Expr expr = stmtResultExprs.get(i);
-      String fieldName = null;
-      // Check if the label meets the Metastore's requirements.
-      if (MetaStoreUtils.validateName(labels.get(i))) {
-        fieldName = labels.get(i);
-        // Make sure the field names are unique.
-        if (!hasUniqueLabels) {
-          fieldName = "_" + Integer.toString(i) + "_" + fieldName;
-        }
-      } else {
-        // Use the expr ordinal to construct a StructField.
-        fieldName = "_" + Integer.toString(i);
-      }
-      Preconditions.checkNotNull(fieldName);
-      structFields.add(new StructField(fieldName, expr.getType(), null));
-    }
-    Preconditions.checkState(structFields.size() != 0);
-    return new StructType(structFields);
-  }
-
-  @Override
-  public Subquery clone() { return new Subquery(this); }
-
-  @Override
-  protected void toThrift(TExprNode msg) {}
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TableName.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TableName.java b/fe/src/main/java/com/cloudera/impala/analysis/TableName.java
deleted file mode 100644
index 5fc8f7e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TableName.java
+++ /dev/null
@@ -1,123 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a table/view name that optionally includes its database (a fully qualified
- * table name). Analysis of this table name checks for validity of the database and
- * table name according to the Metastore's policy (see @MetaStoreUtils).
- * According to that definition, we can still use "invalid" table names for tables/views
- * that are not stored in the Metastore, e.g., for Inline Views or WITH-clause views.
- */
-public class TableName {
-  private final String db_;
-  private final String tbl_;
-
-  public TableName(String db, String tbl) {
-    super();
-    Preconditions.checkArgument(db == null || !db.isEmpty());
-    this.db_ = db;
-    Preconditions.checkNotNull(tbl);
-    this.tbl_ = tbl;
-  }
-
-  public String getDb() { return db_; }
-  public String getTbl() { return tbl_; }
-  public boolean isEmpty() { return tbl_.isEmpty(); }
-
-  /**
-   * Checks whether the db and table name meet the Metastore's requirements.
-   */
-  public void analyze() throws AnalysisException {
-    if (db_ != null) {
-      if (!MetaStoreUtils.validateName(db_)) {
-        throw new AnalysisException("Invalid database name: " + db_);
-      }
-    }
-    Preconditions.checkNotNull(tbl_);
-    if (!MetaStoreUtils.validateName(tbl_)) {
-      throw new AnalysisException("Invalid table/view name: " + tbl_);
-    }
-  }
-
-  /**
-   * Returns true if this name has a non-empty database field and a non-empty
-   * table name.
-   */
-  public boolean isFullyQualified() {
-    return db_ != null && !db_.isEmpty() && !tbl_.isEmpty();
-  }
-
-  public String toSql() {
-    // Enclose the database and/or table name in quotes if Hive cannot parse them
-    // without quotes. This is needed for view compatibility between Impala and Hive.
-    if (db_ == null) {
-      return ToSqlUtils.getIdentSql(tbl_);
-    } else {
-      return ToSqlUtils.getIdentSql(db_) + "." + ToSqlUtils.getIdentSql(tbl_);
-    }
-  }
-
-  @Override
-  public String toString() {
-    if (db_ == null) {
-      return tbl_;
-    } else {
-      return db_ + "." + tbl_;
-    }
-  }
-
-  public List<String> toPath() {
-    List<String> result = Lists.newArrayListWithCapacity(2);
-    if (db_ != null) result.add(db_);
-    result.add(tbl_);
-    return result;
-  }
-
-  public static TableName fromThrift(TTableName tableName) {
-    return new TableName(tableName.getDb_name(), tableName.getTable_name());
-  }
-
-  public TTableName toThrift() { return new TTableName(db_, tbl_); }
-
-  /**
-   * Returns true of the table names are considered equals. To check for equality,
-   * a case-insensitive comparison of the database and table name is performed.
-   */
-  @Override
-  public boolean equals(Object anObject) {
-    if (anObject instanceof TableName) {
-      return toString().toLowerCase().equals(anObject.toString().toLowerCase());
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return toString().toLowerCase().hashCode();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TableRef.java b/fe/src/main/java/com/cloudera/impala/analysis/TableRef.java
deleted file mode 100644
index 0ff0575..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TableRef.java
+++ /dev/null
@@ -1,587 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.thrift.TReplicaPreference;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Superclass of all table references, including references to views, base tables
- * (Hdfs, HBase or DataSource tables), and nested collections. Contains the join
- * specification. An instance of a TableRef (and not a subclass thereof) represents
- * an unresolved table reference that must be resolved during analysis. All resolved
- * table references are subclasses of TableRef.
- *
- * The analysis of table refs follows a two-step process:
- *
- * 1. Resolution: A table ref's path is resolved and then the generic TableRef is
- * replaced by a concrete table ref (a BaseTableRef, CollectionTabeRef or ViewRef)
- * in the originating stmt and that is given the resolved path. This step is driven by
- * Analyzer.resolveTableRef().
- *
- * 2. Analysis/registration: After resolution, the concrete table ref is analyzed
- * to register a tuple descriptor for its resolved path and register other table-ref
- * specific state with the analyzer (e.g., whether it is outer/semi joined, etc.).
- *
- * Therefore, subclasses of TableRef should never call the analyze() of its superclass.
- *
- * TODO for 2.3: The current TableRef class hierarchy and the related two-phase analysis
- * feels convoluted and is hard to follow. We should reorganize the TableRef class
- * structure for clarity of analysis and avoid a table ref 'switching genders' in between
- * resolution and registration.
- *
- * TODO for 2.3: Rename this class to CollectionRef and re-consider the naming and
- * structure of all subclasses.
- */
-public class TableRef implements ParseNode {
-  // Path to a collection type. Not set for inline views.
-  protected List<String> rawPath_;
-
-  // Legal aliases of this table ref. Contains the explicit alias as its sole element if
-  // there is one. Otherwise, contains the two implicit aliases. Implicit aliases are set
-  // in the c'tor of the corresponding resolved table ref (subclasses of TableRef) during
-  // analysis. By convention, for table refs with multiple implicit aliases, aliases_[0]
-  // contains the fully-qualified implicit alias to ensure that aliases_[0] always
-  // uniquely identifies this table ref regardless of whether it has an explicit alias.
-  protected String[] aliases_;
-
-  // Indicates whether this table ref is given an explicit alias,
-  protected boolean hasExplicitAlias_;
-
-  protected JoinOperator joinOp_;
-  protected ArrayList<String> joinHints_;
-  protected List<String> usingColNames_;
-
-  protected ArrayList<String> tableHints_;
-  protected TReplicaPreference replicaPreference_;
-  protected boolean randomReplica_;
-
-  // Hinted distribution mode for this table ref; set after analyzeJoinHints()
-  // TODO: Move join-specific members out of TableRef.
-  private DistributionMode distrMode_ = DistributionMode.NONE;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // Resolution of rawPath_ if applicable. Result of analysis.
-  protected Path resolvedPath_;
-
-  protected Expr onClause_;
-
-  // the ref to the left of us, if we're part of a JOIN clause
-  protected TableRef leftTblRef_;
-
-  // true if this TableRef has been analyzed; implementing subclass should set it to true
-  // at the end of analyze() call.
-  protected boolean isAnalyzed_;
-
-  // Lists of table ref ids and materialized tuple ids of the full sequence of table
-  // refs up to and including this one. These ids are cached during analysis because
-  // we may alter the chain of table refs during plan generation, but we still rely
-  // on the original list of ids for correct predicate assignment.
-  // Populated in analyzeJoin().
-  protected List<TupleId> allTableRefIds_ = Lists.newArrayList();
-  protected List<TupleId> allMaterializedTupleIds_ = Lists.newArrayList();
-
-  // All physical tuple ids that this table ref is correlated with:
-  // Tuple ids of root descriptors from outer query blocks that this table ref
-  // (if a CollectionTableRef) or contained CollectionTableRefs (if an InlineViewRef)
-  // are rooted at. Populated during analysis.
-  protected List<TupleId> correlatedTupleIds_ = Lists.newArrayList();
-
-  // analysis output
-  protected TupleDescriptor desc_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  public TableRef(List<String> path, String alias) {
-    super();
-    rawPath_ = path;
-    if (alias != null) {
-      aliases_ = new String[] { alias.toLowerCase() };
-      hasExplicitAlias_ = true;
-    } else {
-      hasExplicitAlias_ = false;
-    }
-    isAnalyzed_ = false;
-    replicaPreference_ = null;
-    randomReplica_ = false;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected TableRef(TableRef other) {
-    rawPath_ = other.rawPath_;
-    resolvedPath_ = other.resolvedPath_;
-    aliases_ = other.aliases_;
-    hasExplicitAlias_ = other.hasExplicitAlias_;
-    joinOp_ = other.joinOp_;
-    joinHints_ =
-        (other.joinHints_ != null) ? Lists.newArrayList(other.joinHints_) : null;
-    onClause_ = (other.onClause_ != null) ? other.onClause_.clone() : null;
-    usingColNames_ =
-        (other.usingColNames_ != null) ? Lists.newArrayList(other.usingColNames_) : null;
-    tableHints_ =
-        (other.tableHints_ != null) ? Lists.newArrayList(other.tableHints_) : null;
-    replicaPreference_ = other.replicaPreference_;
-    randomReplica_ = other.randomReplica_;
-    distrMode_ = other.distrMode_;
-    // The table ref links are created at the statement level, so cloning a set of linked
-    // table refs is the responsibility of the statement.
-    leftTblRef_ = null;
-    isAnalyzed_ = other.isAnalyzed_;
-    allTableRefIds_ = Lists.newArrayList(other.allTableRefIds_);
-    allMaterializedTupleIds_ = Lists.newArrayList(other.allMaterializedTupleIds_);
-    correlatedTupleIds_ = Lists.newArrayList(other.correlatedTupleIds_);
-    desc_ = other.desc_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    throw new IllegalStateException(
-        "Should not call analyze() on an unresolved TableRef.");
-  }
-
-  /**
-   * Creates and returns a empty TupleDescriptor registered with the analyzer
-   * based on the resolvedPath_.
-   * This method is called from the analyzer when registering this table reference.
-   */
-  public TupleDescriptor createTupleDescriptor(Analyzer analyzer)
-      throws AnalysisException {
-    TupleDescriptor result = analyzer.getDescTbl().createTupleDescriptor(
-        getClass().getSimpleName() + " " + getUniqueAlias());
-    result.setPath(resolvedPath_);
-    return result;
-  }
-
-  /**
-   * Set this table's context-dependent join attributes from the given table.
-   * Does not clone the attributes.
-   */
-  protected void setJoinAttrs(TableRef other) {
-    this.joinOp_ = other.joinOp_;
-    this.joinHints_ = other.joinHints_;
-    this.tableHints_ = other.tableHints_;
-    this.onClause_ = other.onClause_;
-    this.usingColNames_ = other.usingColNames_;
-  }
-
-  public JoinOperator getJoinOp() {
-    // if it's not explicitly set, we're doing an inner join
-    return (joinOp_ == null ? JoinOperator.INNER_JOIN : joinOp_);
-  }
-
-  public TReplicaPreference getReplicaPreference() { return replicaPreference_; }
-  public boolean getRandomReplica() { return randomReplica_; }
-
-  /**
-   * Returns true if this table ref has a resolved path that is rooted at a registered
-   * tuple descriptor, false otherwise.
-   */
-  public boolean isRelative() { return false; }
-
-  /**
-   * Indicates if this TableRef directly or indirectly references another TableRef from
-   * an outer query block.
-   */
-  public boolean isCorrelated() { return !correlatedTupleIds_.isEmpty(); }
-
-  public List<String> getPath() { return rawPath_; }
-  public Path getResolvedPath() { return resolvedPath_; }
-
-  /**
-   * Returns all legal aliases of this table ref.
-   */
-  public String[] getAliases() { return aliases_; }
-
-  /**
-   * Returns the explicit alias or the fully-qualified implicit alias. The returned alias
-   * is guaranteed to be unique (i.e., column/field references against the alias cannot
-   * be ambiguous).
-   */
-  public String getUniqueAlias() { return aliases_[0]; }
-
-  /**
-   * Returns true if this table ref has an explicit alias.
-   * Note that getAliases().length() == 1 does not imply an explicit alias because
-   * nested collection refs have only a single implicit alias.
-   */
-  public boolean hasExplicitAlias() { return hasExplicitAlias_; }
-
-  /**
-   * Returns the explicit alias if this table ref has one, null otherwise.
-   */
-  public String getExplicitAlias() {
-    if (hasExplicitAlias()) return getUniqueAlias();
-    return null;
-  }
-
-  public Table getTable() {
-    Preconditions.checkNotNull(resolvedPath_);
-    return resolvedPath_.getRootTable();
-  }
-  public ArrayList<String> getJoinHints() { return joinHints_; }
-  public ArrayList<String> getTableHints() { return tableHints_; }
-  public Expr getOnClause() { return onClause_; }
-  public List<String> getUsingClause() { return usingColNames_; }
-  public void setJoinOp(JoinOperator op) { this.joinOp_ = op; }
-  public void setOnClause(Expr e) { this.onClause_ = e; }
-  public void setUsingClause(List<String> colNames) { this.usingColNames_ = colNames; }
-  public TableRef getLeftTblRef() { return leftTblRef_; }
-  public void setLeftTblRef(TableRef leftTblRef) { this.leftTblRef_ = leftTblRef; }
-  public void setJoinHints(ArrayList<String> hints) { this.joinHints_ = hints; }
-  public void setTableHints(ArrayList<String> hints) { this.tableHints_ = hints; }
-  public boolean isBroadcastJoin() { return distrMode_ == DistributionMode.BROADCAST; }
-  public boolean isPartitionedJoin() {
-    return distrMode_ == DistributionMode.PARTITIONED;
-  }
-  public DistributionMode getDistributionMode() { return distrMode_; }
-  public List<TupleId> getCorrelatedTupleIds() { return correlatedTupleIds_; }
-  public boolean isAnalyzed() { return isAnalyzed_; }
-  public boolean isResolved() { return !getClass().equals(TableRef.class); }
-
-  /**
-   * This method should only be called after the TableRef has been analyzed.
-   */
-  public TupleDescriptor getDesc() {
-    Preconditions.checkState(isAnalyzed_);
-    // after analyze(), desc should be set.
-    Preconditions.checkState(desc_ != null);
-    return desc_;
-  }
-
-  /**
-   * This method should only be called after the TableRef has been analyzed.
-   */
-  public TupleId getId() {
-    Preconditions.checkState(isAnalyzed_);
-    // after analyze(), desc should be set.
-    Preconditions.checkNotNull(desc_);
-    return desc_.getId();
-  }
-
-  public List<TupleId> getMaterializedTupleIds() {
-    // This function should only be called after analyze().
-    Preconditions.checkState(isAnalyzed_);
-    Preconditions.checkNotNull(desc_);
-    return desc_.getId().asList();
-  }
-
-  /**
-   * Returns the list of tuple ids materialized by the full sequence of
-   * table refs up to and including this one.
-   */
-  public List<TupleId> getAllMaterializedTupleIds() {
-    Preconditions.checkState(isAnalyzed_);
-    return allMaterializedTupleIds_;
-  }
-
-  /**
-   * Return the list of table ref ids of the full sequence of table refs up to
-   * and including this one.
-   */
-  public List<TupleId> getAllTableRefIds() {
-    Preconditions.checkState(isAnalyzed_);
-    return allTableRefIds_;
-  }
-
-  protected void analyzeHints(Analyzer analyzer) throws AnalysisException {
-    // We prefer adding warnings over throwing exceptions here to maintain view
-    // compatibility with Hive.
-    Preconditions.checkState(isResolved());
-    analyzeTableHints(analyzer);
-    analyzeJoinHints(analyzer);
-  }
-
-  private void analyzeTableHints(Analyzer analyzer) {
-    if (tableHints_ == null) return;
-    if (!(this instanceof BaseTableRef)) {
-      analyzer.addWarning("Table hints not supported for inline view and collections");
-      return;
-    }
-    // BaseTableRef will always have their path resolved at this point.
-    Preconditions.checkState(getResolvedPath() != null);
-    if (getResolvedPath().destTable() != null &&
-        !(getResolvedPath().destTable() instanceof HdfsTable)) {
-      analyzer.addWarning("Table hints only supported for Hdfs tables");
-    }
-    for (String hint: tableHints_) {
-      if (hint.equalsIgnoreCase("SCHEDULE_CACHE_LOCAL")) {
-        analyzer.setHasPlanHints();
-        replicaPreference_ = TReplicaPreference.CACHE_LOCAL;
-      } else if (hint.equalsIgnoreCase("SCHEDULE_DISK_LOCAL")) {
-        analyzer.setHasPlanHints();
-        replicaPreference_ = TReplicaPreference.DISK_LOCAL;
-      } else if (hint.equalsIgnoreCase("SCHEDULE_REMOTE")) {
-        analyzer.setHasPlanHints();
-        replicaPreference_ = TReplicaPreference.REMOTE;
-      } else if (hint.equalsIgnoreCase("SCHEDULE_RANDOM_REPLICA")) {
-        analyzer.setHasPlanHints();
-        randomReplica_ = true;
-      } else {
-        Preconditions.checkState(getAliases() != null && getAliases().length > 0);
-        analyzer.addWarning("Table hint not recognized for table " + getUniqueAlias() +
-            ": " + hint);
-      }
-    }
-  }
-
-  private void analyzeJoinHints(Analyzer analyzer) throws AnalysisException {
-    if (joinHints_ == null) return;
-    for (String hint: joinHints_) {
-      if (hint.equalsIgnoreCase("BROADCAST")) {
-        if (joinOp_ == JoinOperator.RIGHT_OUTER_JOIN
-            || joinOp_ == JoinOperator.FULL_OUTER_JOIN
-            || joinOp_ == JoinOperator.RIGHT_SEMI_JOIN
-            || joinOp_ == JoinOperator.RIGHT_ANTI_JOIN) {
-          throw new AnalysisException(
-              joinOp_.toString() + " does not support BROADCAST.");
-        }
-        if (isPartitionedJoin()) {
-          throw new AnalysisException("Conflicting JOIN hint: " + hint);
-        }
-        distrMode_ = DistributionMode.BROADCAST;
-        analyzer.setHasPlanHints();
-      } else if (hint.equalsIgnoreCase("SHUFFLE")) {
-        if (joinOp_ == JoinOperator.CROSS_JOIN) {
-          throw new AnalysisException("CROSS JOIN does not support SHUFFLE.");
-        }
-        if (isBroadcastJoin()) {
-          throw new AnalysisException("Conflicting JOIN hint: " + hint);
-        }
-        distrMode_ = DistributionMode.PARTITIONED;
-        analyzer.setHasPlanHints();
-      } else {
-        analyzer.addWarning("JOIN hint not recognized: " + hint);
-      }
-    }
-  }
-
-  /**
-   * Analyzes the join clause. Populates allTableRefIds_ and allMaterializedTupleIds_.
-   * The join clause can only be analyzed after the left table has been analyzed
-   * and the TupleDescriptor (desc) of this table has been created.
-   */
-  public void analyzeJoin(Analyzer analyzer) throws AnalysisException {
-    Preconditions.checkState(leftTblRef_ == null || leftTblRef_.isAnalyzed_);
-    Preconditions.checkState(desc_ != null);
-
-    // Populate the lists of all table ref and materialized tuple ids.
-    allTableRefIds_.clear();
-    allMaterializedTupleIds_.clear();
-    if (leftTblRef_ != null) {
-      allTableRefIds_.addAll(leftTblRef_.getAllTableRefIds());
-      allMaterializedTupleIds_.addAll(leftTblRef_.getAllMaterializedTupleIds());
-    }
-    allTableRefIds_.add(getId());
-    allMaterializedTupleIds_.addAll(getMaterializedTupleIds());
-
-    if (joinOp_ == JoinOperator.CROSS_JOIN) {
-      // A CROSS JOIN is always a broadcast join, regardless of the join hints
-      distrMode_ = DistributionMode.BROADCAST;
-    }
-
-    if (usingColNames_ != null) {
-      Preconditions.checkState(joinOp_ != JoinOperator.CROSS_JOIN);
-      // Turn USING clause into equivalent ON clause.
-      onClause_ = null;
-      for (String colName: usingColNames_) {
-        // check whether colName exists both for our table and the one
-        // to the left of us
-        Path leftColPath = new Path(leftTblRef_.getDesc(),
-            Lists.newArrayList(colName.toLowerCase()));
-        if (!leftColPath.resolve()) {
-          throw new AnalysisException(
-              "unknown column " + colName + " for alias "
-              + leftTblRef_.getUniqueAlias() + " (in \"" + this.toSql() + "\")");
-        }
-        Path rightColPath = new Path(desc_,
-            Lists.newArrayList(colName.toLowerCase()));
-        if (!rightColPath.resolve()) {
-          throw new AnalysisException(
-              "unknown column " + colName + " for alias "
-              + getUniqueAlias() + " (in \"" + this.toSql() + "\")");
-        }
-
-        // create predicate "<left>.colName = <right>.colName"
-        BinaryPredicate eqPred =
-            new BinaryPredicate(BinaryPredicate.Operator.EQ,
-              new SlotRef(Path.createRawPath(leftTblRef_.getUniqueAlias(), colName)),
-              new SlotRef(Path.createRawPath(getUniqueAlias(), colName)));
-        onClause_ = CompoundPredicate.createConjunction(eqPred, onClause_);
-      }
-    }
-
-    // at this point, both 'this' and leftTblRef have been analyzed and registered;
-    // register the tuple ids of the TableRefs on the nullable side of an outer join
-    if (joinOp_ == JoinOperator.LEFT_OUTER_JOIN
-        || joinOp_ == JoinOperator.FULL_OUTER_JOIN) {
-      analyzer.registerOuterJoinedTids(getId().asList(), this);
-    }
-    if (joinOp_ == JoinOperator.RIGHT_OUTER_JOIN
-        || joinOp_ == JoinOperator.FULL_OUTER_JOIN) {
-      analyzer.registerOuterJoinedTids(leftTblRef_.getAllTableRefIds(), this);
-    }
-    // register the tuple ids of a full outer join
-    if (joinOp_ == JoinOperator.FULL_OUTER_JOIN) {
-      analyzer.registerFullOuterJoinedTids(leftTblRef_.getAllTableRefIds(), this);
-      analyzer.registerFullOuterJoinedTids(getId().asList(), this);
-    }
-    // register the tuple id of the rhs of a left semi join
-    TupleId semiJoinedTupleId = null;
-    if (joinOp_ == JoinOperator.LEFT_SEMI_JOIN
-        || joinOp_ == JoinOperator.LEFT_ANTI_JOIN
-        || joinOp_ == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN) {
-      analyzer.registerSemiJoinedTid(getId(), this);
-      semiJoinedTupleId = getId();
-    }
-    // register the tuple id of the lhs of a right semi join
-    if (joinOp_ == JoinOperator.RIGHT_SEMI_JOIN
-        || joinOp_ == JoinOperator.RIGHT_ANTI_JOIN) {
-      analyzer.registerSemiJoinedTid(leftTblRef_.getId(), this);
-      semiJoinedTupleId = leftTblRef_.getId();
-    }
-
-    if (onClause_ != null) {
-      Preconditions.checkState(joinOp_ != JoinOperator.CROSS_JOIN);
-      analyzer.setVisibleSemiJoinedTuple(semiJoinedTupleId);
-      onClause_.analyze(analyzer);
-      analyzer.setVisibleSemiJoinedTuple(null);
-      onClause_.checkReturnsBool("ON clause", true);
-        if (onClause_.contains(Expr.isAggregatePredicate())) {
-          throw new AnalysisException(
-              "aggregate function not allowed in ON clause: " + toSql());
-      }
-      if (onClause_.contains(AnalyticExpr.class)) {
-        throw new AnalysisException(
-            "analytic expression not allowed in ON clause: " + toSql());
-      }
-      Set<TupleId> onClauseTupleIds = Sets.newHashSet();
-      List<Expr> conjuncts = onClause_.getConjuncts();
-      // Outer join clause conjuncts are registered for this particular table ref
-      // (ie, can only be evaluated by the plan node that implements this join).
-      // The exception are conjuncts that only pertain to the nullable side
-      // of the outer join; those can be evaluated directly when materializing tuples
-      // without violating outer join semantics.
-      analyzer.registerOnClauseConjuncts(conjuncts, this);
-      for (Expr e: conjuncts) {
-        List<TupleId> tupleIds = Lists.newArrayList();
-        e.getIds(tupleIds, null);
-        onClauseTupleIds.addAll(tupleIds);
-      }
-    } else if (!isRelative() && !isCorrelated()
-        && (getJoinOp().isOuterJoin() || getJoinOp().isSemiJoin())) {
-      throw new AnalysisException(
-          joinOp_.toString() + " requires an ON or USING clause.");
-    } else {
-      // Indicate that this table ref has an empty ON-clause.
-      analyzer.registerOnClauseConjuncts(Collections.<Expr>emptyList(), this);
-    }
-  }
-
-  protected String tableRefToSql() {
-    String aliasSql = null;
-    String alias = getExplicitAlias();
-    if (alias != null) aliasSql = ToSqlUtils.getIdentSql(alias);
-    List<String> path = rawPath_;
-    if (resolvedPath_ != null) path = resolvedPath_.getFullyQualifiedRawPath();
-    return ToSqlUtils.getPathSql(path) + ((aliasSql != null) ? " " + aliasSql : "");
-  }
-
-  @Override
-  public String toSql() {
-    if (joinOp_ == null) {
-      // prepend "," if we're part of a sequence of table refs w/o an
-      // explicit JOIN clause
-      return (leftTblRef_ != null ? ", " : "") + tableRefToSql();
-    }
-
-    StringBuilder output = new StringBuilder(" " + joinOp_.toString() + " ");
-    if(joinHints_ != null) output.append(ToSqlUtils.getPlanHintsSql(joinHints_) + " ");
-    output.append(tableRefToSql());
-    if (usingColNames_ != null) {
-      output.append(" USING (").append(Joiner.on(", ").join(usingColNames_)).append(")");
-    } else if (onClause_ != null) {
-      output.append(" ON ").append(onClause_.toSql());
-    }
-    return output.toString();
-  }
-
-  /**
-   * Returns a deep clone of this table ref without also cloning the chain of table refs.
-   * Sets leftTblRef_ in the returned clone to null.
-   */
-  @Override
-  protected TableRef clone() { return new TableRef(this); }
-
-  /**
-   * Deep copies the given list of table refs and returns the clones in a new list.
-   * The linking structure in the original table refs is preserved in the clones,
-   * i.e., if the table refs were originally linked, then the corresponding clones
-   * are linked in the same way. Similarly, if the original table refs were not linked
-   * then the clones are also not linked.
-   * Assumes that the given table refs are self-contained with respect to linking, i.e.,
-   * that no table ref links to another table ref not in the list.
-   */
-  public static List<TableRef> cloneTableRefList(List<TableRef> tblRefs) {
-    List<TableRef> clonedTblRefs = Lists.newArrayListWithCapacity(tblRefs.size());
-    TableRef leftTblRef = null;
-    for (TableRef tblRef: tblRefs) {
-      TableRef tblRefClone = tblRef.clone();
-      clonedTblRefs.add(tblRefClone);
-      if (tblRef.leftTblRef_ != null) {
-        Preconditions.checkState(tblRefs.contains(tblRef.leftTblRef_));
-        tblRefClone.leftTblRef_ = leftTblRef;
-      }
-      leftTblRef = tblRefClone;
-    }
-    return clonedTblRefs;
-  }
-
-  public void reset() {
-    isAnalyzed_ = false;
-    resolvedPath_ = null;
-    if (usingColNames_ != null) {
-      // The using col names are converted into an on-clause predicate during analysis,
-      // so unset the on-clause here.
-      onClause_ = null;
-    } else if (onClause_ != null) {
-      onClause_.reset();
-    }
-    leftTblRef_ = null;
-    allTableRefIds_.clear();
-    allMaterializedTupleIds_.clear();
-    correlatedTupleIds_.clear();
-    desc_ = null;
-  }
-}


[30/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/DatabaseNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/DatabaseNotFoundException.java b/fe/src/main/java/com/cloudera/impala/catalog/DatabaseNotFoundException.java
deleted file mode 100644
index 8affb11..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/DatabaseNotFoundException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown when a database cannot be found in the catalog.
- */
-public class DatabaseNotFoundException extends CatalogException {
-  // Dummy serial ID to satisfy Eclipse
-  private static final long serialVersionUID = -2203080667446640542L;
-
-  public DatabaseNotFoundException(String s) { super(s); }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Db.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Db.java b/fe/src/main/java/com/cloudera/impala/catalog/Db.java
deleted file mode 100644
index a9150fe..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Db.java
+++ /dev/null
@@ -1,495 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.util.PatternMatcher;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Internal representation of db-related metadata. Owned by Catalog instance.
- * Not thread safe.
- *
- * Tables are stored in a map from the table name to the table object. They may
- * be loaded 'eagerly' at construction or 'lazily' on first reference.
- * Tables are accessed via getTable which may trigger a metadata read in two cases:
- *  * if the table has never been loaded
- *  * if the table loading failed on the previous attempt
- *
- * Native user added functions are persisted to the parameters map of the hive metastore
- * db object corresponding to this instance. This map's key is the function signature and
- * value is the base64 representation of the thrift serialized function object.
- *
- */
-public class Db implements CatalogObject {
-  private static final Logger LOG = LoggerFactory.getLogger(Db.class);
-  private final Catalog parentCatalog_;
-  private final TDatabase thriftDb_;
-  private long catalogVersion_ = Catalog.INITIAL_CATALOG_VERSION;
-
-  public static final String FUNCTION_INDEX_PREFIX = "impala_registered_function_";
-
-  // Hive metastore imposes a limit of 4000 bytes on the key and value strings
-  // in DB parameters map. We need ensure that this limit isn't crossed
-  // while serializing functions to the metastore.
-  private static final int HIVE_METASTORE_DB_PARAM_LIMIT_BYTES = 4000;
-
-  // Table metadata cache.
-  private final CatalogObjectCache<Table> tableCache_;
-
-  // All of the registered user functions. The key is the user facing name (e.g. "myUdf"),
-  // and the values are all the overloaded variants (e.g. myUdf(double), myUdf(string))
-  // This includes both UDFs and UDAs. Updates are made thread safe by synchronizing
-  // on this map. When a new Db object is initialized, this list is updated with the
-  // UDF/UDAs already persisted, if any, in the metastore DB. Functions are sorted in a
-  // canonical order defined by FunctionResolutionOrder.
-  private final HashMap<String, List<Function>> functions_;
-
-  // If true, this database is an Impala system database.
-  // (e.g. can't drop it, can't add tables to it, etc).
-  private boolean isSystemDb_ = false;
-
-  public Db(String name, Catalog catalog,
-      org.apache.hadoop.hive.metastore.api.Database msDb) {
-    thriftDb_ = new TDatabase(name.toLowerCase());
-    parentCatalog_ = catalog;
-    thriftDb_.setMetastore_db(msDb);
-    tableCache_ = new CatalogObjectCache<Table>();
-    functions_ = new HashMap<String, List<Function>>();
-  }
-
-  public void setIsSystemDb(boolean b) { isSystemDb_ = b; }
-
-  /**
-   * Creates a Db object with no tables based on the given TDatabase thrift struct.
-   */
-  public static Db fromTDatabase(TDatabase db, Catalog parentCatalog) {
-    return new Db(db.getDb_name(), parentCatalog, db.getMetastore_db());
-  }
-
-  /**
-   * Updates the hms parameters map by adding the input <k,v> pair.
-   */
-  private void putToHmsParameters(String k, String v) {
-    org.apache.hadoop.hive.metastore.api.Database msDb = thriftDb_.metastore_db;
-    Preconditions.checkNotNull(msDb);
-    Map<String, String> hmsParams = msDb.getParameters();
-    if (hmsParams == null) hmsParams = Maps.newHashMap();
-    hmsParams.put(k,v);
-    msDb.setParameters(hmsParams);
-  }
-
-  /**
-   * Updates the hms parameters map by removing the <k,v> pair corresponding to
-   * input key <k>. Returns true if the parameters map contains a pair <k,v>
-   * corresponding to input k and it is removed, false otherwise.
-   */
-  private boolean removeFromHmsParameters(String k) {
-    org.apache.hadoop.hive.metastore.api.Database msDb = thriftDb_.metastore_db;
-    Preconditions.checkNotNull(msDb);
-    if (msDb.getParameters() == null) return false;
-    return msDb.getParameters().remove(k) != null;
-  }
-
-  public boolean isSystemDb() { return isSystemDb_; }
-  public TDatabase toThrift() { return thriftDb_; }
-  @Override
-  public String getName() { return thriftDb_.getDb_name(); }
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.DATABASE;
-  }
-
-  /**
-   * Adds a table to the table cache.
-   */
-  public void addTable(Table table) {
-    tableCache_.add(table);
-  }
-
-  /**
-   * Gets all table names in the table cache.
-   */
-  public List<String> getAllTableNames() {
-    return Lists.newArrayList(tableCache_.keySet());
-  }
-
-  public boolean containsTable(String tableName) {
-    return tableCache_.contains(tableName.toLowerCase());
-  }
-
-  /**
-   * Returns the Table with the given name if present in the table cache or null if the
-   * table does not exist in the cache.
-   */
-  public Table getTable(String tblName) {
-    return tableCache_.get(tblName);
-  }
-
-  /**
-   * Removes the table name and any cached metadata from the Table cache.
-   */
-  public Table removeTable(String tableName) {
-    return tableCache_.remove(tableName.toLowerCase());
-  }
-
-  /**
-   * Comparator that sorts function overloads. We want overloads to be always considered
-   * in a canonical order so that overload resolution in the case of multiple valid
-   * overloads does not depend on the order in which functions are added to the Db. The
-   * order is based on the PrimitiveType enum because this was the order used implicitly
-   * for builtin operators and functions in earlier versions of Impala.
-   */
-  private static class FunctionResolutionOrder implements Comparator<Function> {
-    @Override
-    public int compare(Function f1, Function f2) {
-      int numSharedArgs = Math.min(f1.getNumArgs(), f2.getNumArgs());
-      for (int i = 0; i < numSharedArgs; ++i) {
-        int cmp = typeCompare(f1.getArgs()[i], f2.getArgs()[i]);
-        if (cmp < 0) {
-          return -1;
-        } else if (cmp > 0) {
-          return 1;
-        }
-      }
-      // Put alternative with fewer args first.
-      if (f1.getNumArgs() < f2.getNumArgs()) {
-        return -1;
-      } else if (f1.getNumArgs() > f2.getNumArgs()) {
-        return 1;
-      }
-      return 0;
-    }
-
-    private int typeCompare(Type t1, Type t2) {
-      Preconditions.checkState(!t1.isComplexType());
-      Preconditions.checkState(!t2.isComplexType());
-      return Integer.compare(t1.getPrimitiveType().ordinal(),
-          t2.getPrimitiveType().ordinal());
-    }
-  }
-
-  private static final FunctionResolutionOrder FUNCTION_RESOLUTION_ORDER =
-      new FunctionResolutionOrder();
-
-  /**
-   * Returns the metastore.api.Database object this Database was created from.
-   * Returns null if it is not related to a hive database such as builtins_db.
-   */
-  public org.apache.hadoop.hive.metastore.api.Database getMetaStoreDb() {
-    return thriftDb_.getMetastore_db();
-  }
-
-  /**
-   * Returns the number of functions in this database.
-   */
-  public int numFunctions() {
-    synchronized (functions_) {
-      return functions_.size();
-    }
-  }
-
-  /**
-   * See comment in Catalog.
-   */
-  public boolean containsFunction(String name) {
-    synchronized (functions_) {
-      return functions_.get(name) != null;
-    }
-  }
-
-  /*
-   * See comment in Catalog.
-   */
-  public Function getFunction(Function desc, Function.CompareMode mode) {
-    synchronized (functions_) {
-      List<Function> fns = functions_.get(desc.functionName());
-      if (fns == null) return null;
-
-      // First check for identical
-      for (Function f: fns) {
-        if (f.compare(desc, Function.CompareMode.IS_IDENTICAL)) return f;
-      }
-      if (mode == Function.CompareMode.IS_IDENTICAL) return null;
-
-      // Next check for indistinguishable
-      for (Function f: fns) {
-        if (f.compare(desc, Function.CompareMode.IS_INDISTINGUISHABLE)) return f;
-      }
-      if (mode == Function.CompareMode.IS_INDISTINGUISHABLE) return null;
-
-      // Next check for strict supertypes
-      for (Function f: fns) {
-        if (f.compare(desc, Function.CompareMode.IS_SUPERTYPE_OF)) return f;
-      }
-      if (mode == Function.CompareMode.IS_SUPERTYPE_OF) return null;
-
-      // Finally check for non-strict supertypes
-      for (Function f: fns) {
-        if (f.compare(desc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF)) return f;
-      }
-    }
-    return null;
-  }
-
-  public Function getFunction(String signatureString) {
-    synchronized (functions_) {
-      for (List<Function> fns: functions_.values()) {
-        for (Function f: fns) {
-          if (f.signatureString().equals(signatureString)) return f;
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Adds the user defined function fn to metastore DB params. fn is
-   * serialized to thrift using TBinaryProtocol and then base64-encoded
-   * to be compatible with the HMS' representation of params.
-   */
-  private boolean addFunctionToDbParams(Function fn) {
-    Preconditions.checkState(
-        fn.getBinaryType() != TFunctionBinaryType.BUILTIN &&
-        fn.getBinaryType() != TFunctionBinaryType.JAVA);
-    try {
-      TSerializer serializer =
-          new TSerializer(new TCompactProtocol.Factory());
-      byte[] serializedFn = serializer.serialize(fn.toThrift());
-      String base64Fn = Base64.encodeBase64String(serializedFn);
-      String fnKey = FUNCTION_INDEX_PREFIX + fn.signatureString();
-      if (base64Fn.length() > HIVE_METASTORE_DB_PARAM_LIMIT_BYTES) {
-        throw new ImpalaRuntimeException(
-            "Serialized function size exceeded HMS 4K byte limit");
-      }
-      putToHmsParameters(fnKey, base64Fn);
-    } catch (ImpalaException | TException  e) {
-      LOG.error("Error adding function " + fn.getName() + " to DB params", e);
-      return false;
-    }
-    return true;
-  }
-
-  public boolean addFunction(Function fn) {
-    // We use the db parameters map to persist native and IR functions.
-    boolean addToDbParams =
-        (fn.getBinaryType() == TFunctionBinaryType.NATIVE ||
-         fn.getBinaryType() == TFunctionBinaryType.IR);
-    return addFunction(fn, addToDbParams);
-  }
-
-  /**
-   * Registers the function fn to this database. If addToDbParams is true,
-   * fn is added to the metastore DB params. Returns false if the function
-   * fn already exists or when a failure is encountered while adding it to
-   * the metastore DB params and true otherwise.
-   */
-  public boolean addFunction(Function fn, boolean addToDbParams) {
-    Preconditions.checkState(fn.dbName().equals(getName()));
-    synchronized (functions_) {
-      if (getFunction(fn, Function.CompareMode.IS_INDISTINGUISHABLE) != null) {
-        return false;
-      }
-      List<Function> fns = functions_.get(fn.functionName());
-      if (fns == null) {
-        fns = Lists.newArrayList();
-        functions_.put(fn.functionName(), fns);
-      }
-      if (addToDbParams && !addFunctionToDbParams(fn)) return false;
-      fns.add(fn);
-      Collections.sort(fns, FUNCTION_RESOLUTION_ORDER);
-      return true;
-    }
-  }
-
-  /**
-   * See comment in Catalog.
-   */
-  public Function removeFunction(Function desc) {
-    synchronized (functions_) {
-      Function fn = getFunction(desc, Function.CompareMode.IS_INDISTINGUISHABLE);
-      if (fn == null) return null;
-      List<Function> fns = functions_.get(desc.functionName());
-      Preconditions.checkNotNull(fns);
-      fns.remove(fn);
-      if (fns.isEmpty()) functions_.remove(desc.functionName());
-      if (fn.getBinaryType() == TFunctionBinaryType.JAVA) return fn;
-      // Remove the function from the metastore database parameters
-      String fnKey = FUNCTION_INDEX_PREFIX + fn.signatureString();
-      boolean removeFn = removeFromHmsParameters(fnKey);
-      Preconditions.checkState(removeFn);
-      return fn;
-    }
-  }
-
-  /**
-   * Removes a Function with the matching signature string. Returns the removed Function
-   * if a Function was removed as a result of this call, null otherwise.
-   * TODO: Move away from using signature strings and instead use Function IDs.
-   */
-  public Function removeFunction(String signatureStr) {
-    synchronized (functions_) {
-      Function targetFn = getFunction(signatureStr);
-      if (targetFn != null) return removeFunction(targetFn);
-    }
-    return null;
-  }
-
-  /**
-   * Add a builtin with the specified name and signatures to this db.
-   * This defaults to not using a Prepare/Close function.
-   */
-  public void addScalarBuiltin(String fnName, String symbol, boolean userVisible,
-      boolean varArgs, Type retType, Type ... args) {
-    addScalarBuiltin(fnName, symbol, userVisible, null, null, varArgs, retType, args);
-  }
-
-  /**
-   * Add a builtin with the specified name and signatures to this db.
-   */
-  public void addScalarBuiltin(String fnName, String symbol, boolean userVisible,
-      String prepareFnSymbol, String closeFnSymbol, boolean varArgs, Type retType,
-      Type ... args) {
-    Preconditions.checkState(isSystemDb());
-    addBuiltin(ScalarFunction.createBuiltin(
-        fnName, Lists.newArrayList(args), varArgs, retType,
-        symbol, prepareFnSymbol, closeFnSymbol, userVisible));
-  }
-
-  /**
-   * Adds a builtin to this database. The function must not already exist.
-   */
-  public void addBuiltin(Function fn) {
-    Preconditions.checkState(isSystemDb());
-    Preconditions.checkState(fn != null);
-    Preconditions.checkState(getFunction(fn, Function.CompareMode.IS_IDENTICAL) == null);
-    addFunction(fn, false);
-  }
-
-  /**
-   * Returns a map of functionNames to list of (overloaded) functions with that name.
-   * This is not thread safe so a higher level lock must be taken while iterating
-   * over the returned functions.
-   */
-  protected HashMap<String, List<Function>> getAllFunctions() {
-    return functions_;
-  }
-
-  /**
-   * Returns a list of transient functions in this Db.
-   */
-  protected List<Function> getTransientFunctions() {
-    List<Function> result = Lists.newArrayList();
-    synchronized (functions_) {
-      for (String fnKey: functions_.keySet()) {
-        for (Function fn: functions_.get(fnKey)) {
-          if (fn.userVisible() && !fn.isPersistent()) {
-            result.add(fn);
-          }
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns all functions that match the pattern of 'matcher'.
-   */
-  public List<Function> getFunctions(TFunctionCategory category,
-      PatternMatcher matcher) {
-    Preconditions.checkNotNull(matcher);
-    List<Function> result = Lists.newArrayList();
-    synchronized (functions_) {
-      for (Map.Entry<String, List<Function>> fns: functions_.entrySet()) {
-        if (!matcher.matches(fns.getKey())) continue;
-        for (Function fn: fns.getValue()) {
-          if ((category == null || Function.categoryMatch(fn, category))
-              && fn.userVisible()) {
-            result.add(fn);
-          }
-        }
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns all functions with the given name
-   */
-  public List<Function> getFunctions(String name) {
-    List<Function> result = Lists.newArrayList();
-    Preconditions.checkNotNull(name);
-    synchronized (functions_) {
-      if (!functions_.containsKey(name)) return result;
-      for (Function fn: functions_.get(name)) {
-        if (fn.userVisible()) result.add(fn);
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Returns all functions with the given name and category.
-   */
-  public List<Function> getFunctions(TFunctionCategory category, String name) {
-    List<Function> result = Lists.newArrayList();
-    Preconditions.checkNotNull(category);
-    Preconditions.checkNotNull(name);
-    synchronized (functions_) {
-      if (!functions_.containsKey(name)) return result;
-      for (Function fn: functions_.get(name)) {
-        if (fn.userVisible() && Function.categoryMatch(fn, category)) {
-          result.add(fn);
-        }
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public long getCatalogVersion() { return catalogVersion_; }
-  @Override
-  public void setCatalogVersion(long newVersion) { catalogVersion_ = newVersion; }
-  public Catalog getParentCatalog() { return parentCatalog_; }
-
-  @Override
-  public boolean isLoaded() { return true; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Function.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Function.java b/fe/src/main/java/com/cloudera/impala/catalog/Function.java
deleted file mode 100644
index 406e958..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Function.java
+++ /dev/null
@@ -1,488 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAggregateFunction;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TScalarFunction;
-import com.cloudera.impala.thrift.TSymbolLookupParams;
-import com.cloudera.impala.thrift.TSymbolLookupResult;
-import com.cloudera.impala.thrift.TSymbolType;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-
-/**
- * Base class for all functions.
- * Each function can be of the following 4 types.
- * - Native/IR stored in db params (persisted, visible to Impala)
- * - Hive UDFs stored in the HMS (visible to Hive + Impala)
- * - Java UDFs which are not persisted (visible to Impala but not Hive)
- * - Builtin functions, which are recreated after every restart of the
- *   catalog. (persisted, visible to Impala)
- */
-public class Function implements CatalogObject {
-  // Enum for how to compare function signatures.
-  // For decimal types, the type in the function can be a wildcard, i.e. decimal(*,*).
-  // The wildcard can *only* exist as function type, the caller will always be a
-  // fully specified decimal.
-  // For the purposes of function type resolution, decimal(*,*) will match exactly
-  // with any fully specified decimal (i.e. fn(decimal(*,*)) matches identically for
-  // the call to fn(decimal(1,0)).
-  public enum CompareMode {
-    // Two signatures are identical if the number of arguments and their types match
-    // exactly and either both signatures are varargs or neither.
-    IS_IDENTICAL,
-
-    // Two signatures are indistinguishable if there is no way to tell them apart
-    // when matching a particular instantiation. That is, their fixed arguments
-    // match exactly and the remaining varargs have the same type.
-    // e.g. fn(int, int, int) and fn(int...)
-    // Argument types that are NULL are ignored when doing this comparison.
-    // e.g. fn(NULL, int) is indistinguishable from fn(int, int)
-    IS_INDISTINGUISHABLE,
-
-    // X is a supertype of Y if Y.arg[i] can be strictly implicitly cast to X.arg[i]. If
-    /// X has vargs, the remaining arguments of Y must be strictly implicitly castable
-    // to the var arg type. The key property this provides is that X can be used in place
-    // of Y. e.g. fn(int, double, string...) is a supertype of fn(tinyint, float, string,
-    // string)
-    IS_SUPERTYPE_OF,
-
-    // Nonstrict supertypes broaden the definition of supertype to accept implicit casts
-    // of arguments that may result in loss of precision - e.g. decimal to float.
-    IS_NONSTRICT_SUPERTYPE_OF,
-  }
-
-  // User specified function name e.g. "Add"
-  private FunctionName name_;
-
-  private final Type retType_;
-  // Array of parameter types.  empty array if this function does not have parameters.
-  private Type[] argTypes_;
-
-  // If true, this function has variable arguments.
-  // TODO: we don't currently support varargs with no fixed types. i.e. fn(...)
-  private boolean hasVarArgs_;
-
-  // If true (default), this function is called directly by the user. For operators,
-  // this is false. If false, it also means the function is not visible from
-  // 'show functions'.
-  private boolean userVisible_;
-
-  // Absolute path in HDFS for the binary that contains this function.
-  // e.g. /udfs/udfs.jar
-  private HdfsUri location_;
-  private TFunctionBinaryType binaryType_;
-
-  // Set to true for functions that survive service restarts, including all builtins,
-  // native and IR functions, but only Java functions created without a signature.
-  private boolean isPersistent_;
-  private long catalogVersion_ =  Catalog.INITIAL_CATALOG_VERSION;
-
-  public Function(FunctionName name, Type[] argTypes,
-      Type retType, boolean varArgs) {
-    this.name_ = name;
-    this.hasVarArgs_ = varArgs;
-    if (argTypes == null) {
-      argTypes_ = new Type[0];
-    } else {
-      this.argTypes_ = argTypes;
-    }
-    if (retType == null) {
-      this.retType_ = ScalarType.INVALID;
-    } else {
-      this.retType_ = retType;
-    }
-    this.userVisible_ = true;
-  }
-
-  public Function(FunctionName name, List<Type> args,
-      Type retType, boolean varArgs) {
-    this(name, (Type[])null, retType, varArgs);
-    if (args != null && args.size() > 0) {
-      argTypes_ = args.toArray(new Type[args.size()]);
-    } else {
-      argTypes_ = new Type[0];
-    }
-  }
-
-  /**
-   * Static helper method to create a function with a given TFunctionBinaryType.
-   */
-  public static Function createFunction(String db, String fnName, List<Type> args,
-      Type retType, boolean varArgs, TFunctionBinaryType fnType) {
-    Function fn =
-        new Function(new FunctionName(db, fnName), args, retType, varArgs);
-    fn.setBinaryType(fnType);
-    return fn;
-  }
-
-  public FunctionName getFunctionName() { return name_; }
-  public String functionName() { return name_.getFunction(); }
-  public String dbName() { return name_.getDb(); }
-  public Type getReturnType() { return retType_; }
-  public Type[] getArgs() { return argTypes_; }
-  // Returns the number of arguments to this function.
-  public int getNumArgs() { return argTypes_.length; }
-  public HdfsUri getLocation() { return location_; }
-  public TFunctionBinaryType getBinaryType() { return binaryType_; }
-  public boolean hasVarArgs() { return hasVarArgs_; }
-  public boolean isPersistent() { return isPersistent_; }
-  public boolean userVisible() { return userVisible_; }
-  public Type getVarArgsType() {
-    if (!hasVarArgs_) return Type.INVALID;
-    Preconditions.checkState(argTypes_.length > 0);
-    return argTypes_[argTypes_.length - 1];
-  }
-
-  public void setName(FunctionName name) { name_ = name; }
-  public void setLocation(HdfsUri loc) { location_ = loc; }
-  public void setBinaryType(TFunctionBinaryType type) { binaryType_ = type; }
-  public void setHasVarArgs(boolean v) { hasVarArgs_ = v; }
-  public void setIsPersistent(boolean v) { isPersistent_ = v; }
-  public void setUserVisible(boolean b) { userVisible_ = b; }
-
-  // Returns a string with the signature in human readable format:
-  // FnName(argtype1, argtyp2).  e.g. Add(int, int)
-  public String signatureString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(name_.getFunction())
-      .append("(")
-      .append(Joiner.on(", ").join(argTypes_));
-    if (hasVarArgs_) sb.append("...");
-    sb.append(")");
-    return sb.toString();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof Function)) return false;
-    return compare((Function)o, CompareMode.IS_IDENTICAL);
-  }
-
-  // Compares this to 'other' for mode.
-  public boolean compare(Function other, CompareMode mode) {
-    switch (mode) {
-      case IS_IDENTICAL: return isIdentical(other);
-      case IS_INDISTINGUISHABLE: return isIndistinguishable(other);
-      case IS_SUPERTYPE_OF: return isSuperTypeOf(other, true);
-      case IS_NONSTRICT_SUPERTYPE_OF: return isSuperTypeOf(other, false);
-      default:
-        Preconditions.checkState(false);
-        return false;
-    }
-  }
-  /**
-   * Returns true if 'this' is a supertype of 'other'. Each argument in other must
-   * be implicitly castable to the matching argument in this. If strict is true,
-   * only consider conversions where there is no loss of precision.
-   */
-  private boolean isSuperTypeOf(Function other, boolean strict) {
-    if (!other.name_.equals(name_)) return false;
-    if (!this.hasVarArgs_ && other.argTypes_.length != this.argTypes_.length) {
-      return false;
-    }
-    if (this.hasVarArgs_ && other.argTypes_.length < this.argTypes_.length) return false;
-    for (int i = 0; i < this.argTypes_.length; ++i) {
-      if (!Type.isImplicitlyCastable(other.argTypes_[i], this.argTypes_[i], strict)) {
-        return false;
-      }
-    }
-    // Check trailing varargs.
-    if (this.hasVarArgs_) {
-      for (int i = this.argTypes_.length; i < other.argTypes_.length; ++i) {
-        if (other.argTypes_[i].matchesType(this.getVarArgsType())) continue;
-        if (!Type.isImplicitlyCastable(other.argTypes_[i], this.getVarArgsType(),
-              strict)) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Converts any CHAR arguments to be STRING arguments
-   */
-  public Function promoteCharsToStrings() {
-    Type[] promoted = argTypes_.clone();
-    for (int i = 0; i < promoted.length; ++i) {
-      if (promoted[i].isScalarType(PrimitiveType.CHAR)) promoted[i] = ScalarType.STRING;
-    }
-    return new Function(name_, promoted, retType_, hasVarArgs_);
-  }
-
-  /**
-   * Given a list of functions which are a super type of this function, select the best
-   * match. This is the one which requires the fewest type promotions.
-   */
-  public Function selectClosestSuperType(List<Function> candidates) {
-    Preconditions.checkArgument(candidates.size() > 0);
-    if (candidates.size() == 1) return candidates.get(0);
-
-    // Always promote CHAR to STRING before attempting any other promotions.
-    Function withStrs = promoteCharsToStrings();
-    for (Function f: candidates) {
-      if (withStrs.isIndistinguishable(f)) return f;
-    }
-    // Otherwise, we use the previous rules of resolution which are to take the first
-    // one in the list.
-    return candidates.get(0);
-  }
-
-  private boolean isIdentical(Function o) {
-    if (!o.name_.equals(name_)) return false;
-    if (o.argTypes_.length != this.argTypes_.length) return false;
-    if (o.hasVarArgs_ != this.hasVarArgs_) return false;
-    for (int i = 0; i < this.argTypes_.length; ++i) {
-      if (!o.argTypes_[i].matchesType(this.argTypes_[i])) return false;
-    }
-    return true;
-  }
-
-  private boolean isIndistinguishable(Function o) {
-    if (!o.name_.equals(name_)) return false;
-    int minArgs = Math.min(o.argTypes_.length, this.argTypes_.length);
-    // The first fully specified args must be identical.
-    for (int i = 0; i < minArgs; ++i) {
-      if (o.argTypes_[i].isNull() || this.argTypes_[i].isNull()) continue;
-      if (!o.argTypes_[i].matchesType(this.argTypes_[i])) return false;
-    }
-    if (o.argTypes_.length == this.argTypes_.length) return true;
-
-    if (o.hasVarArgs_ && this.hasVarArgs_) {
-      if (!o.getVarArgsType().matchesType(this.getVarArgsType())) return false;
-      if (this.getNumArgs() > o.getNumArgs()) {
-        for (int i = minArgs; i < this.getNumArgs(); ++i) {
-          if (this.argTypes_[i].isNull()) continue;
-          if (!this.argTypes_[i].matchesType(o.getVarArgsType())) return false;
-        }
-      } else {
-        for (int i = minArgs; i < o.getNumArgs(); ++i) {
-          if (o.argTypes_[i].isNull()) continue;
-          if (!o.argTypes_[i].matchesType(this.getVarArgsType())) return false;
-        }
-      }
-      return true;
-    } else if (o.hasVarArgs_) {
-      // o has var args so check the remaining arguments from this
-      if (o.getNumArgs() > minArgs) return false;
-      for (int i = minArgs; i < this.getNumArgs(); ++i) {
-        if (this.argTypes_[i].isNull()) continue;
-        if (!this.argTypes_[i].matchesType(o.getVarArgsType())) return false;
-      }
-      return true;
-    } else if (this.hasVarArgs_) {
-      // this has var args so check the remaining arguments from s
-      if (this.getNumArgs() > minArgs) return false;
-      for (int i = minArgs; i < o.getNumArgs(); ++i) {
-        if (o.argTypes_[i].isNull()) continue;
-        if (!o.argTypes_[i].matchesType(this.getVarArgsType())) return false;
-      }
-      return true;
-    } else {
-      // Neither has var args and the lengths don't match
-      return false;
-    }
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.FUNCTION; }
-
-  @Override
-  public long getCatalogVersion() { return catalogVersion_; }
-
-  @Override
-  public void setCatalogVersion(long newVersion) { catalogVersion_ = newVersion; }
-
-  @Override
-  public String getName() { return getFunctionName().toString(); }
-
-  // Child classes must override this function.
-  public String toSql(boolean ifNotExists) { return ""; }
-
-  public TFunction toThrift() {
-    TFunction fn = new TFunction();
-    fn.setSignature(signatureString());
-    fn.setName(name_.toThrift());
-    fn.setBinary_type(binaryType_);
-    if (location_ != null) fn.setHdfs_location(location_.toString());
-    fn.setArg_types(Type.toThrift(argTypes_));
-    fn.setRet_type(getReturnType().toThrift());
-    fn.setHas_var_args(hasVarArgs_);
-    fn.setIs_persistent(isPersistent_);
-    // TODO: Comment field is missing?
-    // fn.setComment(comment_)
-    return fn;
-  }
-
-  public static Function fromThrift(TFunction fn) {
-    List<Type> argTypes = Lists.newArrayList();
-    for (TColumnType t: fn.getArg_types()) {
-      argTypes.add(Type.fromThrift(t));
-    }
-
-    Function function = null;
-    if (fn.isSetScalar_fn()) {
-      TScalarFunction scalarFn = fn.getScalar_fn();
-      function = new ScalarFunction(FunctionName.fromThrift(fn.getName()), argTypes,
-          Type.fromThrift(fn.getRet_type()), new HdfsUri(fn.getHdfs_location()),
-          scalarFn.getSymbol(), scalarFn.getPrepare_fn_symbol(),
-          scalarFn.getClose_fn_symbol());
-    } else if (fn.isSetAggregate_fn()) {
-      TAggregateFunction aggFn = fn.getAggregate_fn();
-      function = new AggregateFunction(FunctionName.fromThrift(fn.getName()), argTypes,
-          Type.fromThrift(fn.getRet_type()),
-          Type.fromThrift(aggFn.getIntermediate_type()),
-          new HdfsUri(fn.getHdfs_location()), aggFn.getUpdate_fn_symbol(),
-          aggFn.getInit_fn_symbol(), aggFn.getSerialize_fn_symbol(),
-          aggFn.getMerge_fn_symbol(), aggFn.getGet_value_fn_symbol(),
-          null, aggFn.getFinalize_fn_symbol());
-    } else {
-      // In the case where we are trying to look up the object, we only have the
-      // signature.
-      function = new Function(FunctionName.fromThrift(fn.getName()),
-          argTypes, Type.fromThrift(fn.getRet_type()), fn.isHas_var_args());
-    }
-    function.setBinaryType(fn.getBinary_type());
-    function.setHasVarArgs(fn.isHas_var_args());
-    if (fn.isSetIs_persistent()) {
-      function.setIsPersistent(fn.isIs_persistent());
-    } else {
-      function.setIsPersistent(false);
-    }
-    return function;
-  }
-
-  @Override
-  public boolean isLoaded() { return true; }
-
-  // Returns the resolved symbol in the binary. The BE will do a lookup of 'symbol'
-  // in the binary and try to resolve unmangled names.
-  // If this function is expecting a return argument, retArgType is that type. It should
-  // be null if this function isn't expecting a return argument.
-  public String lookupSymbol(String symbol, TSymbolType symbolType, Type retArgType,
-      boolean hasVarArgs, Type... argTypes) throws AnalysisException {
-    if (symbol.length() == 0) {
-      if (binaryType_ == TFunctionBinaryType.BUILTIN) {
-        // We allow empty builtin symbols in order to stage work in the FE before its
-        // implemented in the BE
-        return symbol;
-      }
-      throw new AnalysisException("Could not find symbol ''");
-    }
-
-    TSymbolLookupParams lookup = new TSymbolLookupParams();
-    // Builtin functions do not have an external library, they are loaded directly from
-    // the running process
-    lookup.location =  binaryType_ != TFunctionBinaryType.BUILTIN ?
-        location_.toString() : "";
-    lookup.symbol = symbol;
-    lookup.symbol_type = symbolType;
-    lookup.fn_binary_type = binaryType_;
-    lookup.arg_types = Type.toThrift(argTypes);
-    lookup.has_var_args = hasVarArgs;
-    if (retArgType != null) lookup.setRet_arg_type(retArgType.toThrift());
-
-    try {
-      TSymbolLookupResult result = FeSupport.LookupSymbol(lookup);
-      switch (result.result_code) {
-        case SYMBOL_FOUND:
-          return result.symbol;
-        case BINARY_NOT_FOUND:
-          Preconditions.checkState(binaryType_ != TFunctionBinaryType.BUILTIN);
-          throw new AnalysisException(
-              "Could not load binary: " + location_.getLocation() + "\n" +
-              result.error_msg);
-        case SYMBOL_NOT_FOUND:
-          throw new AnalysisException(result.error_msg);
-        default:
-          // Should never get here.
-          throw new AnalysisException("Internal Error");
-      }
-    } catch (InternalException e) {
-      // Should never get here.
-      e.printStackTrace();
-      throw new AnalysisException("Could not find symbol: " + symbol, e);
-    }
-  }
-
-  public String lookupSymbol(String symbol, TSymbolType symbolType)
-      throws AnalysisException {
-    Preconditions.checkState(
-        symbolType == TSymbolType.UDF_PREPARE || symbolType == TSymbolType.UDF_CLOSE);
-    return lookupSymbol(symbol, symbolType, null, false);
-  }
-
-  public static String getUdfType(Type t) {
-    switch (t.getPrimitiveType()) {
-    case BOOLEAN:
-      return "BooleanVal";
-    case TINYINT:
-      return "TinyIntVal";
-    case SMALLINT:
-      return "SmallIntVal";
-    case INT:
-      return "IntVal";
-    case BIGINT:
-      return "BigIntVal";
-    case FLOAT:
-      return "FloatVal";
-    case DOUBLE:
-      return "DoubleVal";
-    case STRING:
-    case VARCHAR:
-    case CHAR:
-      return "StringVal";
-    case TIMESTAMP:
-      return "TimestampVal";
-    case DECIMAL:
-      return "DecimalVal";
-    default:
-      Preconditions.checkState(false, t.toString());
-      return "";
-    }
-  }
-
-  /**
-   * Returns true if the given function matches the specified category.
-   */
-  public static boolean categoryMatch(Function fn, TFunctionCategory category) {
-    Preconditions.checkNotNull(category);
-    return (category == TFunctionCategory.SCALAR && fn instanceof ScalarFunction)
-        || (category == TFunctionCategory.AGGREGATE
-            && fn instanceof AggregateFunction
-            && ((AggregateFunction)fn).isAggregateFn())
-        || (category == TFunctionCategory.ANALYTIC
-            && fn instanceof AggregateFunction
-            && ((AggregateFunction)fn).isAnalyticFn());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HBaseColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HBaseColumn.java b/fe/src/main/java/com/cloudera/impala/catalog/HBaseColumn.java
deleted file mode 100644
index 37fa853..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HBaseColumn.java
+++ /dev/null
@@ -1,67 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.thrift.TColumn;
-
-// Describes an HBase column mapped to a Hive column (as described in the metastore).
-// this.name describes the column name in Hive.
-// This class adds the HBase columnFamily and columnQualifier,
-// so we can read the column from HBase directly.
-public class HBaseColumn extends Column implements Comparable<HBaseColumn> {
-  private final String columnFamily_;
-  private final String columnQualifier_;
-  private final boolean binaryEncoded_;
-
-  public HBaseColumn(String name, String columnFamily, String columnQualifier,
-      boolean binaryEncoded, Type type, String comment, int position) {
-    super(name, type, comment, position);
-    columnFamily_ = columnFamily;
-    columnQualifier_ = columnQualifier;
-    binaryEncoded_ = binaryEncoded;
-  }
-
-  public String getColumnFamily() { return columnFamily_; }
-  public String getColumnQualifier() { return columnQualifier_; }
-  public boolean isBinaryEncoded() { return binaryEncoded_; }
-
-  @Override
-  // We order the HBase columns in the matadata based on columnFamily,columnQualifier,
-  // to more easily map slots from HBase's Result.raw() to target slots in the backend.
-  public int compareTo(HBaseColumn o) {
-    int familyCmp = columnFamily_.compareTo(o.columnFamily_);
-    if (familyCmp != 0) {
-      return familyCmp;
-    }
-    int qualifierCmp = columnQualifier_.compareTo(o.columnQualifier_);
-    return qualifierCmp;
-  }
-
-  @Override
-  public TColumn toThrift() {
-    TColumn colDesc = new TColumn(name_, type_.toThrift());
-    if (comment_ != null) colDesc.setComment(comment_);
-    colDesc.setCol_stats(getStats().toThrift());
-    colDesc.setPosition(position_);
-    colDesc.setIs_hbase_column(true);
-    colDesc.setColumn_family(columnFamily_);
-    colDesc.setColumn_qualifier(columnQualifier_);
-    colDesc.setIs_binary(binaryEncoded_);
-    return colDesc;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HBaseTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HBaseTable.java b/fe/src/main/java/com/cloudera/impala/catalog/HBaseTable.java
deleted file mode 100644
index d96314e..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HBaseTable.java
+++ /dev/null
@@ -1,853 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hive.hbase.HBaseSerDe;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.THBaseTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.StatsHelper;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Impala representation of HBase table metadata,
- * as loaded from Hive's metastore.
- * This implies that we inherit the metastore's limitations related to HBase,
- * for example the lack of support for composite HBase row keys.
- * We sort the HBase columns (cols) by family/qualifier
- * to simplify the retrieval logic in the backend, since
- * HBase returns data ordered by family/qualifier.
- * This implies that a "select *"-query on an HBase table
- * will not have the columns ordered as they were declared in the DDL.
- * They will be ordered by family/qualifier.
- *
- */
-public class HBaseTable extends Table {
-  // Maximum deviation from the average to stop querying more regions
-  // to estimate the row count
-  private static final double DELTA_FROM_AVERAGE = 0.15;
-
-  private static final Logger LOG = Logger.getLogger(HBaseTable.class);
-
-  // Copied from Hive's HBaseStorageHandler.java.
-  public static final String DEFAULT_PREFIX = "default.";
-
-  // Number of rows fetched during the row count estimation per region
-  public static final int ROW_COUNT_ESTIMATE_BATCH_SIZE = 10;
-
-  // Minimum number of regions that are checked to estimate the row count
-  private static final int MIN_NUM_REGIONS_TO_CHECK = 5;
-
-  // Column referring to HBase row key.
-  // Hive (including metastore) currently doesn't support composite HBase keys.
-  protected HBaseColumn rowKey_;
-
-  // Name of table in HBase.
-  // 'this.name' is the alias of the HBase table in Hive.
-  protected String hbaseTableName_;
-
-  // Input format class for HBase tables read by Hive.
-  private static final String HBASE_INPUT_FORMAT =
-      "org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat";
-
-  // Serialization class for HBase tables set in the corresponding Metastore table.
-  private static final String HBASE_SERIALIZATION_LIB =
-      "org.apache.hadoop.hive.hbase.HBaseSerDe";
-
-  // Storage handler class for HBase tables read by Hive.
-  private static final String HBASE_STORAGE_HANDLER =
-      "org.apache.hadoop.hive.hbase.HBaseStorageHandler";
-
-  // Column family of HBase row key
-  private static final String ROW_KEY_COLUMN_FAMILY = ":key";
-
-  // Keep the conf around
-  private final static Configuration hbaseConf_ = HBaseConfiguration.create();
-
-  // Cached column families. Used primarily for speeding up row stats estimation
-  // (see CDH-19292).
-  private HColumnDescriptor[] columnFamilies_ = null;
-
-  protected HBaseTable(TableId id, org.apache.hadoop.hive.metastore.api.Table msTbl,
-      Db db, String name, String owner) {
-    super(id, msTbl, db, name, owner);
-  }
-
-  /**
-   * Connection instances are expensive to create. The HBase documentation recommends
-   * one and then sharing it among threads. All operations on a connection are
-   * thread-safe.
-   */
-  private static class ConnectionHolder {
-    private static Connection connection_ = null;
-
-    public static synchronized Connection getConnection(Configuration conf)
-        throws IOException {
-      if (connection_ == null || connection_.isClosed()) {
-        connection_ = ConnectionFactory.createConnection(conf);
-      }
-      return connection_;
-    }
-  }
-
-  /**
-   * Table client objects are thread-unsafe and cheap to create. The HBase docs recommend
-   * creating a new one for each task and then closing when done.
-   */
-  public org.apache.hadoop.hbase.client.Table getHBaseTable() throws IOException {
-    return ConnectionHolder.getConnection(hbaseConf_)
-        .getTable(TableName.valueOf(hbaseTableName_));
-  }
-
-  private void closeHBaseTable(org.apache.hadoop.hbase.client.Table table) {
-    try {
-      table.close();
-    } catch (IOException e) {
-      LOG.error("Error closing HBase table: " + hbaseTableName_, e);
-    }
-  }
-
-  /**
-   * Get the cluster status, making sure we close the admin client afterwards.
-   */
-  public ClusterStatus getClusterStatus() throws IOException {
-    Admin admin = null;
-    ClusterStatus clusterStatus = null;
-    try {
-      Connection connection = ConnectionHolder.getConnection(hbaseConf_);
-      admin = connection.getAdmin();
-      clusterStatus = admin.getClusterStatus();
-    } finally {
-      if (admin != null) admin.close();
-    }
-    return clusterStatus;
-  }
-
-  /**
-   * Parse the column description string to the column families and column
-   * qualifies. This is a copy of HBaseSerDe.parseColumnMapping and
-   * parseColumnStorageTypes with parts we don't use removed. The hive functions
-   * are not public.
-
-   * tableDefaultStorageIsBinary - true if table is default to binary encoding
-   * columnsMappingSpec - input string format describing the table
-   * fieldSchemas - input field schema from metastore table
-   * columnFamilies/columnQualifiers/columnBinaryEncodings - out parameters that will be
-   * filled with the column family, column qualifier and encoding for each column.
-   */
-  private void parseColumnMapping(boolean tableDefaultStorageIsBinary,
-      String columnsMappingSpec, List<FieldSchema> fieldSchemas,
-      List<String> columnFamilies, List<String> columnQualifiers,
-      List<Boolean> colIsBinaryEncoded) throws SerDeException {
-    if (columnsMappingSpec == null) {
-      throw new SerDeException(
-          "Error: hbase.columns.mapping missing for this HBase table.");
-    }
-
-    if (columnsMappingSpec.equals("") ||
-        columnsMappingSpec.equals(HBaseSerDe.HBASE_KEY_COL)) {
-      throw new SerDeException("Error: hbase.columns.mapping specifies only "
-          + "the HBase table row key. A valid Hive-HBase table must specify at "
-          + "least one additional column.");
-    }
-
-    int rowKeyIndex = -1;
-    String[] columnSpecs = columnsMappingSpec.split(",");
-    // If there was an implicit key column mapping, the number of columns (fieldSchemas)
-    // will be one more than the number of column mapping specs.
-    int fsStartIdxOffset = fieldSchemas.size() - columnSpecs.length;
-    if (fsStartIdxOffset != 0 && fsStartIdxOffset != 1) {
-      // This should never happen - Hive blocks creating a mismatched table and both Hive
-      // and Impala currently block all column-level DDL on HBase tables.
-      throw new SerDeException(String.format("Number of entries in " +
-          "'hbase.columns.mapping' does not match the number of columns in the " +
-          "table: %d != %d (counting the key if implicit)",
-          columnSpecs.length, fieldSchemas.size()));
-    }
-
-    for (int i = 0; i < columnSpecs.length; ++i) {
-      String mappingSpec = columnSpecs[i];
-      String[] mapInfo = mappingSpec.split("#");
-      // Trim column info so that serdeproperties with new lines still parse correctly.
-      String colInfo = mapInfo[0].trim();
-
-      int idxFirst = colInfo.indexOf(":");
-      int idxLast = colInfo.lastIndexOf(":");
-
-      if (idxFirst < 0 || !(idxFirst == idxLast)) {
-        throw new SerDeException("Error: the HBase columns mapping contains a "
-            + "badly formed column family, column qualifier specification.");
-      }
-
-      if (colInfo.equals(HBaseSerDe.HBASE_KEY_COL)) {
-        Preconditions.checkState(fsStartIdxOffset == 0);
-        rowKeyIndex = i;
-        columnFamilies.add(colInfo);
-        columnQualifiers.add(null);
-      } else {
-        String[] parts = colInfo.split(":");
-        Preconditions.checkState(parts.length > 0 && parts.length <= 2);
-        columnFamilies.add(parts[0]);
-        if (parts.length == 2) {
-          columnQualifiers.add(parts[1]);
-        } else {
-          columnQualifiers.add(null);
-        }
-      }
-
-      // Set column binary encoding
-      FieldSchema fieldSchema = fieldSchemas.get(i + fsStartIdxOffset);
-      boolean supportsBinaryEncoding = supportsBinaryEncoding(fieldSchema);
-      if (mapInfo.length == 1) {
-        // There is no column level storage specification. Use the table storage spec.
-        colIsBinaryEncoded.add(
-            new Boolean(tableDefaultStorageIsBinary && supportsBinaryEncoding));
-      } else if (mapInfo.length == 2) {
-        // There is a storage specification for the column
-        String storageOption = mapInfo[1];
-
-        if (!(storageOption.equals("-") || "string".startsWith(storageOption) || "binary"
-            .startsWith(storageOption))) {
-          throw new SerDeException("Error: A column storage specification is one of"
-              + " the following: '-', a prefix of 'string', or a prefix of 'binary'. "
-              + storageOption + " is not a valid storage option specification for "
-              + fieldSchema.getName());
-        }
-
-        boolean isBinaryEncoded = false;
-        if ("-".equals(storageOption)) {
-          isBinaryEncoded = tableDefaultStorageIsBinary;
-        } else if ("binary".startsWith(storageOption)) {
-          isBinaryEncoded = true;
-        }
-        if (isBinaryEncoded && !supportsBinaryEncoding) {
-          // Use string encoding and log a warning if the column spec is binary but the
-          // column type does not support it.
-          // TODO: Hive/HBase does not raise an exception, but should we?
-          LOG.warn("Column storage specification for column " + fieldSchema.getName()
-              + " is binary" + " but the column type " + fieldSchema.getType() +
-              " does not support binary encoding. Fallback to string format.");
-          isBinaryEncoded = false;
-        }
-        colIsBinaryEncoded.add(isBinaryEncoded);
-      } else {
-        // error in storage specification
-        throw new SerDeException("Error: " + HBaseSerDe.HBASE_COLUMNS_MAPPING
-            + " storage specification " + mappingSpec + " is not valid for column: "
-            + fieldSchema.getName());
-      }
-    }
-
-    if (rowKeyIndex == -1) {
-      columnFamilies.add(0, HBaseSerDe.HBASE_KEY_COL);
-      columnQualifiers.add(0, null);
-      colIsBinaryEncoded.add(0,
-          supportsBinaryEncoding(fieldSchemas.get(0)) && tableDefaultStorageIsBinary);
-    }
-  }
-
-  private boolean supportsBinaryEncoding(FieldSchema fs) {
-    try {
-      Type colType = parseColumnType(fs);
-      // Only boolean, integer and floating point types can use binary storage.
-      return colType.isBoolean() || colType.isIntegerType()
-          || colType.isFloatingPointType();
-    } catch (TableLoadingException e) {
-      return false;
-    }
-  }
-
-  @Override
-  /**
-   * For hbase tables, we can support tables with columns we don't understand at
-   * all (e.g. map) as long as the user does not select those. This is in contrast
-   * to hdfs tables since we typically need to understand all columns to make sense
-   * of the file at all.
-   */
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    Preconditions.checkNotNull(getMetaStoreTable());
-    try {
-      msTable_ = msTbl;
-      hbaseTableName_ = getHBaseTableName(getMetaStoreTable());
-      // Warm up the connection and verify the table exists.
-      getHBaseTable().close();
-      columnFamilies_ = null;
-      Map<String, String> serdeParams =
-          getMetaStoreTable().getSd().getSerdeInfo().getParameters();
-      String hbaseColumnsMapping = serdeParams.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
-      if (hbaseColumnsMapping == null) {
-        throw new MetaException("No hbase.columns.mapping defined in Serde.");
-      }
-
-      String hbaseTableDefaultStorageType = getMetaStoreTable().getParameters().get(
-          HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE);
-      boolean tableDefaultStorageIsBinary = false;
-      if (hbaseTableDefaultStorageType != null &&
-          !hbaseTableDefaultStorageType.isEmpty()) {
-        if (hbaseTableDefaultStorageType.equalsIgnoreCase("binary")) {
-          tableDefaultStorageIsBinary = true;
-        } else if (!hbaseTableDefaultStorageType.equalsIgnoreCase("string")) {
-          throw new SerDeException("Error: " +
-              HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE +
-              " parameter must be specified as" +
-              " 'string' or 'binary'; '" + hbaseTableDefaultStorageType +
-              "' is not a valid specification for this table/serde property.");
-        }
-      }
-
-      // Parse HBase column-mapping string.
-      List<FieldSchema> fieldSchemas = getMetaStoreTable().getSd().getCols();
-      List<String> hbaseColumnFamilies = new ArrayList<String>();
-      List<String> hbaseColumnQualifiers = new ArrayList<String>();
-      List<Boolean> hbaseColumnBinaryEncodings = new ArrayList<Boolean>();
-      parseColumnMapping(tableDefaultStorageIsBinary, hbaseColumnsMapping, fieldSchemas,
-          hbaseColumnFamilies, hbaseColumnQualifiers, hbaseColumnBinaryEncodings);
-      Preconditions.checkState(
-          hbaseColumnFamilies.size() == hbaseColumnQualifiers.size());
-      Preconditions.checkState(fieldSchemas.size() == hbaseColumnFamilies.size());
-
-      // Populate tmp cols in the order they appear in the Hive metastore.
-      // We will reorder the cols below.
-      List<HBaseColumn> tmpCols = Lists.newArrayList();
-      // Store the key column separately.
-      // TODO: Change this to an ArrayList once we support composite row keys.
-      HBaseColumn keyCol = null;
-      for (int i = 0; i < fieldSchemas.size(); ++i) {
-        FieldSchema s = fieldSchemas.get(i);
-        Type t = Type.INVALID;
-        try {
-          t = parseColumnType(s);
-        } catch (TableLoadingException e) {
-          // Ignore hbase types we don't support yet. We can load the metadata
-          // but won't be able to select from it.
-        }
-        HBaseColumn col = new HBaseColumn(s.getName(), hbaseColumnFamilies.get(i),
-            hbaseColumnQualifiers.get(i), hbaseColumnBinaryEncodings.get(i),
-            t, s.getComment(), -1);
-        if (col.getColumnFamily().equals(ROW_KEY_COLUMN_FAMILY)) {
-          // Store the row key column separately from the rest
-          keyCol = col;
-        } else {
-          tmpCols.add(col);
-        }
-      }
-      Preconditions.checkState(keyCol != null);
-
-      // The backend assumes that the row key column is always first and
-      // that the remaining HBase columns are ordered by columnFamily,columnQualifier,
-      // so the final position depends on the other mapped HBase columns.
-      // Sort columns and update positions.
-      Collections.sort(tmpCols);
-      clearColumns();
-
-      keyCol.setPosition(0);
-      addColumn(keyCol);
-      // Update the positions of the remaining columns
-      for (int i = 0; i < tmpCols.size(); ++i) {
-        HBaseColumn col = tmpCols.get(i);
-        col.setPosition(i + 1);
-        addColumn(col);
-      }
-
-      // Set table stats.
-      numRows_ = getRowCount(super.getMetaStoreTable().getParameters());
-
-      // since we don't support composite hbase rowkeys yet, all hbase tables have a
-      // single clustering col
-      numClusteringCols_ = 1;
-      loadAllColumnStats(client);
-    } catch (Exception e) {
-      throw new TableLoadingException("Failed to load metadata for HBase table: " +
-          name_, e);
-    }
-  }
-
-  @Override
-  protected void loadFromThrift(TTable table) throws TableLoadingException {
-    super.loadFromThrift(table);
-    try {
-      hbaseTableName_ = getHBaseTableName(getMetaStoreTable());
-      // Warm up the connection and verify the table exists.
-      getHBaseTable().close();
-      columnFamilies_ = null;
-    } catch (Exception e) {
-      throw new TableLoadingException("Failed to load metadata for HBase table from " +
-          "thrift table: " + name_, e);
-    }
-  }
-
-  /**
-   * This method is completely copied from Hive's HBaseStorageHandler.java.
-   */
-  private String getHBaseTableName(org.apache.hadoop.hive.metastore.api.Table tbl) {
-    // Give preference to TBLPROPERTIES over SERDEPROPERTIES
-    // (really we should only use TBLPROPERTIES, so this is just
-    // for backwards compatibility with the original specs).
-    String tableName = tbl.getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
-    if (tableName == null) {
-      tableName = tbl.getSd().getSerdeInfo().getParameters().get(
-          HBaseSerDe.HBASE_TABLE_NAME);
-    }
-    if (tableName == null) {
-      tableName = tbl.getDbName() + "." + tbl.getTableName();
-      if (tableName.startsWith(DEFAULT_PREFIX)) {
-        tableName = tableName.substring(DEFAULT_PREFIX.length());
-      }
-    }
-    return tableName;
-  }
-
-  /**
-   * Estimates the number of rows for a single region and returns a pair with
-   * the estimated row count and the estimated size in bytes per row.
-   */
-  private Pair<Long, Long> getEstimatedRowStatsForRegion(HRegionLocation location,
-      boolean isCompressed, ClusterStatus clusterStatus) throws IOException {
-    HRegionInfo info = location.getRegionInfo();
-
-    Scan s = new Scan(info.getStartKey());
-    // Get a small sample of rows
-    s.setBatch(ROW_COUNT_ESTIMATE_BATCH_SIZE);
-    // Try and get every version so the row's size can be used to estimate.
-    s.setMaxVersions(Short.MAX_VALUE);
-    // Don't cache the blocks as we don't think these are
-    // necessarily important blocks.
-    s.setCacheBlocks(false);
-    // Try and get deletes too so their size can be counted.
-    s.setRaw(false);
-
-    org.apache.hadoop.hbase.client.Table table = getHBaseTable();
-    ResultScanner rs = table.getScanner(s);
-
-    long currentRowSize = 0;
-    long currentRowCount = 0;
-
-    try {
-      // Get the the ROW_COUNT_ESTIMATE_BATCH_SIZE fetched rows
-      // for a representative sample
-      for (int i = 0; i < ROW_COUNT_ESTIMATE_BATCH_SIZE; ++i) {
-        Result r = rs.next();
-        if (r == null)
-          break;
-        // Check for empty rows, see IMPALA-1451
-        if (r.isEmpty())
-          continue;
-        ++currentRowCount;
-        // To estimate the number of rows we simply use the amount of bytes
-        // returned from the underlying buffer. Since HBase internally works
-        // with these structures as well this gives us ok estimates.
-        Cell[] cells = r.rawCells();
-        for (Cell c : cells) {
-          if (c instanceof KeyValue) {
-            currentRowSize += KeyValue.getKeyValueDataStructureSize(c.getRowLength(),
-                c.getFamilyLength(), c.getQualifierLength(), c.getValueLength(),
-                c.getTagsLength());
-          } else {
-            throw new IllegalStateException("Celltype " + c.getClass().getName() +
-                " not supported.");
-          }
-        }
-      }
-    } finally {
-      rs.close();
-      closeHBaseTable(table);
-    }
-
-    // If there are no rows then no need to estimate.
-    if (currentRowCount == 0) return new Pair<Long, Long>(0L, 0L);
-    // Get the size.
-    long currentSize = getRegionSize(location, clusterStatus);
-    // estimate the number of rows.
-    double bytesPerRow = currentRowSize / (double) currentRowCount;
-    if (currentSize == 0) {
-      return new Pair<Long, Long>(currentRowCount, (long) bytesPerRow);
-    }
-
-    // Compression factor two is only a best effort guess
-    long estimatedRowCount =
-        (long) ((isCompressed ? 2 : 1) * (currentSize / bytesPerRow));
-
-    return new Pair<Long, Long>(estimatedRowCount, (long) bytesPerRow);
-  }
-
-  /**
-   * Get an estimate of the number of rows and bytes per row in regions between
-   * startRowKey and endRowKey.
-   *
-   * This number is calculated by incrementally checking as many region servers as
-   * necessary until we observe a relatively constant row size per region on average.
-   * Depending on the skew of data in the regions this can either mean that we need
-   * to check only a minimal number of regions or that we will scan all regions.
-   *
-   * The HBase region servers periodically update the master with their metrics,
-   * including storefile size. We get the size of the storefiles for all regions in
-   * the cluster with a single call to getClusterStatus from the master.
-   *
-   * The accuracy of this number is determined by the number of rows that are written
-   * and kept in the memstore and have not been flushed until now. A large number
-   * of key-value pairs in the memstore will lead to bad estimates as this number
-   * is not reflected in the storefile size that is used to estimate this number.
-   *
-   * Currently, the algorithm does not consider the case that the key range used as a
-   * parameter might be generally of different size than the rest of the region.
-   *
-   * The values computed here should be cached so that in high qps workloads
-   * the nn is not overwhelmed. Could be done in load(); Synchronized to make
-   * sure that only one thread at a time is using the htable.
-   *
-   * @param startRowKey
-   *          First row key in the range
-   * @param endRowKey
-   *          Last row key in the range
-   * @return The estimated number of rows in the regions between the row keys (first) and
-   *         the estimated row size in bytes (second).
-   */
-  public synchronized Pair<Long, Long> getEstimatedRowStats(byte[] startRowKey,
-      byte[] endRowKey) {
-    Preconditions.checkNotNull(startRowKey);
-    Preconditions.checkNotNull(endRowKey);
-
-    boolean isCompressed = false;
-    long rowCount = 0;
-    long rowSize = 0;
-
-    org.apache.hadoop.hbase.client.Table table = null;
-    try {
-      table = getHBaseTable();
-      ClusterStatus clusterStatus = getClusterStatus();
-
-      // Check to see if things are compressed.
-      // If they are we'll estimate a compression factor.
-      if (columnFamilies_ == null) {
-        columnFamilies_ = table.getTableDescriptor().getColumnFamilies();
-      }
-      Preconditions.checkNotNull(columnFamilies_);
-      for (HColumnDescriptor desc : columnFamilies_) {
-        isCompressed |= desc.getCompression() !=  Compression.Algorithm.NONE;
-      }
-
-      // Fetch all regions for the key range
-      List<HRegionLocation> locations = getRegionsInRange(table, startRowKey, endRowKey);
-      Collections.shuffle(locations);
-      // The following variables track the number and size of 'rows' in
-      // HBase and allow incremental calculation of the average and standard
-      // deviation.
-      StatsHelper<Long> statsSize = new StatsHelper<Long>();
-      long totalEstimatedRows = 0;
-
-      // Collects stats samples from at least MIN_NUM_REGIONS_TO_CHECK
-      // and at most all regions until the delta is small enough.
-      while ((statsSize.count() < MIN_NUM_REGIONS_TO_CHECK ||
-          statsSize.stddev() > statsSize.mean() * DELTA_FROM_AVERAGE) &&
-          statsSize.count() < locations.size()) {
-        HRegionLocation currentLocation = locations.get((int) statsSize.count());
-        Pair<Long, Long> tmp = getEstimatedRowStatsForRegion(currentLocation,
-            isCompressed, clusterStatus);
-        totalEstimatedRows += tmp.first;
-        statsSize.addSample(tmp.second);
-      }
-
-      // Sum up the total size for all regions in range.
-      long totalSize = 0;
-      for (final HRegionLocation location : locations) {
-        totalSize += getRegionSize(location, clusterStatus);
-      }
-      if (totalSize == 0) {
-        rowCount = totalEstimatedRows;
-      } else {
-        rowCount = (long) (totalSize / statsSize.mean());
-      }
-      rowSize = (long) statsSize.mean();
-    } catch (IOException ioe) {
-      // Print the stack trace, but we'll ignore it
-      // as this is just an estimate.
-      // TODO: Put this into the per query log.
-      LOG.error("Error computing HBase row count estimate", ioe);
-      return new Pair<Long, Long>(-1l, -1l);
-    } finally {
-      if (table != null) closeHBaseTable(table);
-    }
-    return new Pair<Long, Long>(rowCount, rowSize);
-  }
-
-  /**
-   * Returns the size of the given region in bytes. Simply returns the storefile size
-   * for this region from the ClusterStatus. Returns 0 in case of an error.
-   */
-  public long getRegionSize(HRegionLocation location, ClusterStatus clusterStatus) {
-    HRegionInfo info = location.getRegionInfo();
-    ServerLoad serverLoad = clusterStatus.getLoad(location.getServerName());
-
-    // If the serverLoad is null, the master doesn't have information for this region's
-    // server. This shouldn't normally happen.
-    if (serverLoad == null) {
-      LOG.error("Unable to find load for server: " + location.getServerName() +
-          " for location " + info.getRegionNameAsString());
-      return 0;
-    }
-    RegionLoad regionLoad = serverLoad.getRegionsLoad().get(info.getRegionName());
-
-    final long megaByte = 1024L * 1024L;
-    return regionLoad.getStorefileSizeMB() * megaByte;
-  }
-
-  /**
-   * Hive returns the columns in order of their declaration for HBase tables.
-   */
-  @Override
-  public ArrayList<Column> getColumnsInHiveOrder() {
-    return getColumns();
-  }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    TTableDescriptor tableDescriptor =
-        new TTableDescriptor(id_.asInt(), TTableType.HBASE_TABLE,
-            getTColumnDescriptors(), numClusteringCols_, hbaseTableName_, db_.getName());
-    tableDescriptor.setHbaseTable(getTHBaseTable());
-    return tableDescriptor;
-  }
-
-  public String getHBaseTableName() {
-    return hbaseTableName_;
-  }
-
-  public static Configuration getHBaseConf() {
-    return hbaseConf_;
-  }
-
-  public int getNumNodes() {
-    // TODO: implement
-    return 100;
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.TABLE;
-  }
-
-  @Override
-  public TTable toThrift() {
-    TTable table = super.toThrift();
-    table.setTable_type(TTableType.HBASE_TABLE);
-    table.setHbase_table(getTHBaseTable());
-    return table;
-  }
-
-  private THBaseTable getTHBaseTable() {
-    THBaseTable tHbaseTable = new THBaseTable();
-    tHbaseTable.setTableName(hbaseTableName_);
-    for (Column c : getColumns()) {
-      HBaseColumn hbaseCol = (HBaseColumn) c;
-      tHbaseTable.addToFamilies(hbaseCol.getColumnFamily());
-      if (hbaseCol.getColumnQualifier() != null) {
-        tHbaseTable.addToQualifiers(hbaseCol.getColumnQualifier());
-      } else {
-        tHbaseTable.addToQualifiers("");
-      }
-      tHbaseTable.addToBinary_encoded(hbaseCol.isBinaryEncoded());
-    }
-    return tHbaseTable;
-  }
-
-  /**
-   * This is copied from org.apache.hadoop.hbase.client.HTable. The only difference is
-   * that it does not use cache when calling getRegionLocation.
-   * TODO: Remove this function and use HTable.getRegionsInRange when the non-cache
-   * version has been ported to CDH (DISTRO-477).
-   * Get the corresponding regions for an arbitrary range of keys.
-   * <p>
-   *
-   * @param startRow
-   *          Starting row in range, inclusive
-   * @param endRow
-   *          Ending row in range, exclusive
-   * @return A list of HRegionLocations corresponding to the regions that
-   *         contain the specified range
-   * @throws IOException
-   *           if a remote or network exception occurs
-   */
-  public static List<HRegionLocation> getRegionsInRange(
-      org.apache.hadoop.hbase.client.Table hbaseTbl,
-      final byte[] startKey, final byte[] endKey) throws IOException {
-    final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
-    if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
-      throw new IllegalArgumentException("Invalid range: " +
-          Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey));
-    }
-    final List<HRegionLocation> regionList = new ArrayList<HRegionLocation>();
-    byte[] currentKey = startKey;
-    Connection connection = ConnectionHolder.getConnection(hbaseConf_);
-    // Make sure only one thread is accessing the hbaseTbl.
-    synchronized (hbaseTbl) {
-      RegionLocator locator = connection.getRegionLocator(hbaseTbl.getName());
-      do {
-        // always reload region location info.
-        HRegionLocation regionLocation = locator.getRegionLocation(currentKey, true);
-        regionList.add(regionLocation);
-        currentKey = regionLocation.getRegionInfo().getEndKey();
-      } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) &&
-          (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0));
-    }
-    return regionList;
-  }
-
-  /**
-   * Returns the storage handler class for HBase tables read by Hive.
-   */
-  @Override
-  public String getStorageHandlerClassName() {
-    return HBASE_STORAGE_HANDLER;
-  }
-
-  /**
-   * Returns statistics on this table as a tabular result set. Used for the
-   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
-   * inside this method.
-   */
-  public TResultSet getTableStats() {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-    resultSchema.addToColumns(
-        new TColumn("Region Location", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Start RowKey",
-        Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Est. #Rows", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
-
-    org.apache.hadoop.hbase.client.Table table;
-    try {
-      table = getHBaseTable();
-    } catch (IOException e) {
-      LOG.error("Error getting HBase table " + hbaseTableName_, e);
-      throw new RuntimeException(e);
-    }
-
-    // TODO: Consider fancier stats maintenance techniques for speeding up this process.
-    // Currently, we list all regions and perform a mini-scan of each of them to
-    // estimate the number of rows, the data size, etc., which is rather expensive.
-    try {
-      ClusterStatus clusterStatus = getClusterStatus();
-      long totalNumRows = 0;
-      long totalSize = 0;
-      List<HRegionLocation> regions = HBaseTable.getRegionsInRange(table,
-          HConstants.EMPTY_END_ROW, HConstants.EMPTY_START_ROW);
-      for (HRegionLocation region : regions) {
-        TResultRowBuilder rowBuilder = new TResultRowBuilder();
-        HRegionInfo regionInfo = region.getRegionInfo();
-        Pair<Long, Long> estRowStats =
-            getEstimatedRowStatsForRegion(region, false, clusterStatus);
-
-        long numRows = estRowStats.first.longValue();
-        long regionSize = getRegionSize(region, clusterStatus);
-        totalNumRows += numRows;
-        totalSize += regionSize;
-
-        // Add the region location, start rowkey, number of rows and raw size.
-        rowBuilder.add(String.valueOf(region.getHostname()))
-            .add(Bytes.toString(regionInfo.getStartKey())).add(numRows)
-            .addBytes(regionSize);
-        result.addToRows(rowBuilder.get());
-      }
-
-      // Total num rows and raw region size.
-      if (regions.size() > 1) {
-        TResultRowBuilder rowBuilder = new TResultRowBuilder();
-        rowBuilder.add("Total").add("").add(totalNumRows).addBytes(totalSize);
-        result.addToRows(rowBuilder.get());
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    } finally {
-      closeHBaseTable(table);
-    }
-    return result;
-  }
-
-  /**
-   * Returns true if the given Metastore Table represents an HBase table.
-   * Versions of Hive/HBase are inconsistent which HBase related fields are set
-   * (e.g., HIVE-6548 changed the input format to null).
-   * For maximum compatibility consider all known fields that indicate an HBase table.
-   */
-  public static boolean isHBaseTable(
-      org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    if (msTbl.getParameters() != null &&
-        msTbl.getParameters().containsKey(HBASE_STORAGE_HANDLER)) {
-      return true;
-    }
-    StorageDescriptor sd = msTbl.getSd();
-    if (sd == null) return false;
-    if (sd.getInputFormat() != null && sd.getInputFormat().equals(HBASE_INPUT_FORMAT)) {
-      return true;
-    } else if (sd.getSerdeInfo() != null &&
-        sd.getSerdeInfo().getSerializationLib() != null &&
-        sd.getSerdeInfo().getSerializationLib().equals(HBASE_SERIALIZATION_LIB)) {
-      return true;
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsCachePool.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsCachePool.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsCachePool.java
deleted file mode 100644
index b8ff102..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsCachePool.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.THdfsCachePool;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an HDFS cache pool (CachePoolInfo class). Currently, the only metadata we
- * care about for cache pools is the cache pool name. In the future it may be desirable
- * to track additional metadata such as the owner, size, and current usage of the pool.
- */
-public class HdfsCachePool implements CatalogObject {
-  private long catalogVersion_;
-  private final THdfsCachePool cachePool_;
-
-  public HdfsCachePool(CachePoolInfo cachePoolInfo) {
-    cachePool_ = new THdfsCachePool(cachePoolInfo.getPoolName());
-  }
-
-  public HdfsCachePool(THdfsCachePool cachePool) {
-    Preconditions.checkNotNull(cachePool);
-    cachePool_ = cachePool;
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.HDFS_CACHE_POOL;
-  }
-
-  public THdfsCachePool toThrift() {
-    return cachePool_;
-  }
-
-  public static HdfsCachePool fromThrift(THdfsCachePool cachePool) {
-    return new HdfsCachePool(cachePool);
-  }
-
-  @Override
-  public String getName() { return cachePool_.getPool_name(); }
-  @Override
-  public long getCatalogVersion() { return catalogVersion_; }
-  @Override
-  public void setCatalogVersion(long newVersion) { catalogVersion_ = newVersion; }
-  @Override
-  public boolean isLoaded() { return true; }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HdfsCompression.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HdfsCompression.java b/fe/src/main/java/com/cloudera/impala/catalog/HdfsCompression.java
deleted file mode 100644
index 302ec99..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HdfsCompression.java
+++ /dev/null
@@ -1,85 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.thrift.THdfsCompression;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
-/**
- * Support for recognizing compression suffixes on data files.
- * Compression of a file is recognized in mapreduce by looking for suffixes of
- * supported codecs.
- * For now Impala supports LZO, GZIP, SNAPPY, and BZIP2. LZO can use the specific HIVE
- * input class.
- */
-// TODO: Add LZ4?
-public enum HdfsCompression {
-  NONE,
-  DEFLATE,
-  GZIP,
-  BZIP2,
-  SNAPPY,
-  LZO,
-  LZO_INDEX; //Lzo index file.
-
-  /* Map from a suffix to a compression type */
-  private static final ImmutableMap<String, HdfsCompression> SUFFIX_MAP =
-      ImmutableMap.<String, HdfsCompression>builder().
-          put("deflate", DEFLATE).
-          put("gz", GZIP).
-          put("bz2", BZIP2).
-          put("snappy", SNAPPY).
-          put("lzo", LZO).
-          put("index", LZO_INDEX).
-          build();
-
-  /* Given a file name return its compression type, if any. */
-  public static HdfsCompression fromFileName(String fileName) {
-    int index = fileName.lastIndexOf(".");
-    if (index == -1) {
-      return NONE;
-    }
-
-    String suffix = fileName.substring(index + 1);
-    HdfsCompression compression = SUFFIX_MAP.get(suffix.toLowerCase());
-    return compression == null ? NONE : compression;
-  }
-
-  public THdfsCompression toThrift() {
-    switch (this) {
-    case NONE: return THdfsCompression.NONE;
-    case DEFLATE: return THdfsCompression.DEFLATE;
-    case GZIP: return THdfsCompression.GZIP;
-    case BZIP2: return THdfsCompression.BZIP2;
-    case SNAPPY: return THdfsCompression.SNAPPY_BLOCKED;
-    case LZO: return THdfsCompression.LZO;
-    default: throw new IllegalStateException("Unexpected codec: " + this);
-    }
-  }
-
-  /* Returns a compression type based on (Hive's) intput format. Special case for LZO. */
-  public static HdfsCompression fromHdfsInputFormatClass(String inputFormatClass) {
-    // TODO: Remove when we have the native LZO writer.
-    Preconditions.checkNotNull(inputFormatClass);
-    if (inputFormatClass.equals(HdfsFileFormat.LZO_TEXT.inputFormat())) {
-      return LZO;
-    }
-    return NONE;
-  }
-}


[33/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationPolicy.java b/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationPolicy.java
deleted file mode 100644
index f37f3dc..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/AuthorizationPolicy.java
+++ /dev/null
@@ -1,340 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.net.ntp.TimeStamp;
-import org.apache.log4j.Logger;
-import org.apache.sentry.core.common.ActiveRoleSet;
-import org.apache.sentry.provider.cache.PrivilegeCache;
-
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * A thread safe authorization policy cache, consisting of roles, groups that are
- * members of that role, and the privileges associated with the role. The source data
- * this cache is backing is read from the Sentry Policy Service. Writing to the cache
- * will replace any matching items, but will not write back to the Sentry Policy Service.
- * A role can have 0 or more privileges and roles are stored in a map of role name
- * to role object. For example:
- * RoleName -> Role -> [RolePriv1, ..., RolePrivN]
- * To ensure we can efficiently retrieve the roles that a user is a member of, a map
- * of user group name to role name is tracked as grantGroups_.
- * To reduce duplication of metadata, privileges are linked to roles using a "role ID"
- * rather than embedding the role name. When a privilege is added to a role, we do
- * a lookup to get the role ID to using the roleIds_ map.
- * Acts as the backing cache for the Sentry cached based provider (which is why
- * PrivilegeCache is implemented).
- * TODO: Instead of calling into Sentry to perform final authorization checks, we
- * should parse/validate the privileges in Impala.
- */
-public class AuthorizationPolicy implements PrivilegeCache {
-  private static final Logger LOG = Logger.getLogger(AuthorizationPolicy.class);
-
-  // Cache of role names (case-insensitive) to role objects.
-  private final CatalogObjectCache<Role> roleCache_ = new CatalogObjectCache<Role>();
-
-  // Map of role ID -> role name. Used to match privileges to roles.
-  Map<Integer, String> roleIds_ = Maps.newHashMap();
-
-  // Map of group name (case sensitive) to set of role names (case insensitive) that
-  // have been granted to this group. Kept in sync with roleCache_. Provides efficient
-  // lookups of Role by group name.
-  Map<String, Set<String>> groupsToRoles_ = Maps.newHashMap();
-
-  /**
-   * Adds a new role to the policy. If a role with the same name already
-   * exists and the role ID's are different, it will be overwritten by the new role.
-   * If a role exists and the role IDs are the same, the privileges from the old
-   * role will be copied to the new role.
-   */
-  public synchronized void addRole(Role role) {
-    Role existingRole = roleCache_.get(role.getName());
-    // There is already a newer version of this role in the catalog, ignore
-    // just return.
-    if (existingRole != null &&
-        existingRole.getCatalogVersion() >= role.getCatalogVersion()) return;
-
-    // If there was an existing role that was replaced we first need to remove it.
-    if (existingRole != null) {
-      // Remove the role. This will also clean up the grantGroup mappings.
-      removeRole(existingRole.getName());
-      if (existingRole.getId() == role.getId()) {
-        // Copy the privileges from the existing role.
-        for (RolePrivilege p: existingRole.getPrivileges()) {
-          role.addPrivilege(p);
-        }
-      }
-    }
-    roleCache_.add(role);
-
-    // Add new grants
-    for (String groupName: role.getGrantGroups()) {
-      Set<String> grantedRoles = groupsToRoles_.get(groupName);
-      if (grantedRoles == null) {
-        grantedRoles = Sets.newHashSet();
-        groupsToRoles_.put(groupName, grantedRoles);
-      }
-      grantedRoles.add(role.getName().toLowerCase());
-    }
-
-    // Add this role to the role ID mapping
-    roleIds_.put(role.getId(), role.getName());
-  }
-
-  /**
-   * Adds a new privilege to the policy mapping to the role specified by the
-   * role ID in the privilege.
-   * Throws a CatalogException no role with a corresponding ID existing in the catalog.
-   */
-  public synchronized void addPrivilege(RolePrivilege privilege)
-      throws CatalogException {
-    LOG.trace("Adding privilege: " + privilege.getName() +
-        " role ID: " + privilege.getRoleId());
-    Role role = getRole(privilege.getRoleId());
-    if (role == null) {
-      throw new CatalogException(String.format("Error adding privilege: %s. Role ID " +
-          "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
-    }
-    LOG.trace("Adding privilege: " + privilege.getName() + " to role: " +
-        role.getName() + "ID: " + role.getId());
-    role.addPrivilege(privilege);
-  }
-
-  /**
-   * Removes a privilege from the policy mapping to the role specified by the
-   * role ID in the privilege.
-   * Throws a CatalogException if no role with a corresponding ID exists in the catalog.
-   * Returns null if no matching privilege is found in this role.
-   */
-  public synchronized RolePrivilege removePrivilege(RolePrivilege privilege)
-      throws CatalogException {
-    Role role = getRole(privilege.getRoleId());
-    if (role == null) {
-      throw new CatalogException(String.format("Error removing privilege: %s. Role ID " +
-          "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
-    }
-    LOG.trace("Removing privilege: '" + privilege.getName() + "' from Role ID: " +
-        privilege.getRoleId() + " Role Name: " + role.getName());
-    return role.removePrivilege(privilege.getName());
-  }
-
-  /**
-   * Returns all roles in the policy. Returns an empty list if no roles exist.
-   */
-  public synchronized List<Role> getAllRoles() {
-    return roleCache_.getValues();
-  }
-
-  /**
-   * Returns all role names in the policy. Returns an empty set if no roles exist.
-   */
-  public synchronized Set<String> getAllRoleNames() {
-    return Sets.newHashSet(roleCache_.keySet());
-  }
-
-  /**
-   * Gets a role given a role name. Returns null if no roles exist with this name.
-   */
-  public synchronized Role getRole(String roleName) {
-    return roleCache_.get(roleName);
-  }
-
-  /**
-   * Gets a role given a role ID. Returns null if no roles exist with this ID.
-   */
-  public synchronized Role getRole(int roleId) {
-    String roleName = roleIds_.get(roleId);
-    if (roleName == null) return null;
-    return roleCache_.get(roleName);
-  }
-
-  /**
-   * Gets a privilege from the given role ID. Returns null of there are no roles with a
-   * matching ID or if no privilege with this name exists for the role.
-   */
-  public synchronized RolePrivilege getPrivilege(int roleId, String privilegeName) {
-    String roleName = roleIds_.get(roleId);
-    if (roleName == null) return null;
-    Role role = roleCache_.get(roleName);
-    return role.getPrivilege(privilegeName);
-  }
-
-  /**
-   * Gets all roles granted to the specified group.
-   */
-  public synchronized List<Role> getGrantedRoles(String groupName) {
-    List<Role> grantedRoles = Lists.newArrayList();
-    Set<String> roleNames = groupsToRoles_.get(groupName);
-    if (roleNames != null) {
-      for (String roleName: roleNames) {
-        // TODO: verify they actually exist.
-        Role role = roleCache_.get(roleName);
-        if (role != null) grantedRoles.add(roleCache_.get(roleName));
-      }
-    }
-    return grantedRoles;
-  }
-
-  /**
-   * Removes a role. Returns the removed role or null if no role with
-   * this name existed.
-   */
-  public synchronized Role removeRole(String roleName) {
-    Role removedRole = roleCache_.remove(roleName);
-    if (removedRole == null) return null;
-    // Cleanup grant groups
-    for (String grantGroup: removedRole.getGrantGroups()) {
-      // Remove this role from all of its grant groups.
-      Set<String> roles = groupsToRoles_.get(grantGroup);
-      if (roles != null) roles.remove(roleName.toLowerCase());
-    }
-    // Cleanup role id.
-    roleIds_.remove(removedRole.getId());
-    return removedRole;
-  }
-
-  /**
-   * Adds a new grant group to the specified role. Returns the updated
-   * Role, if a matching role was found. If the role does not exist a
-   * CatalogException is thrown.
-   */
-  public synchronized Role addGrantGroup(String roleName, String groupName)
-      throws CatalogException {
-    Role role = roleCache_.get(roleName);
-    if (role == null) throw new CatalogException("Role does not exist: " + roleName);
-    role.addGrantGroup(groupName);
-    Set<String> grantedRoles = groupsToRoles_.get(groupName);
-    if (grantedRoles == null) {
-      grantedRoles = Sets.newHashSet();
-      groupsToRoles_.put(groupName, grantedRoles);
-    }
-    grantedRoles.add(roleName.toLowerCase());
-    return role;
-  }
-
-  /**
-   * Removes a grant group from the specified role. Returns the updated
-   * Role, if a matching role was found. If the role does not exist a
-   * CatalogException is thrown.
-   */
-  public synchronized Role removeGrantGroup(String roleName, String groupName)
-      throws CatalogException {
-    Role role = roleCache_.get(roleName);
-    if (role == null) throw new CatalogException("Role does not exist: " + roleName);
-    role.removeGrantGroup(groupName);
-    Set<String> grantedRoles = groupsToRoles_.get(groupName);
-    if (grantedRoles != null) {
-      grantedRoles.remove(roleName.toLowerCase());
-    }
-    return role;
-  }
-
-  /**
-   * Returns a set of privilege strings in Sentry format.
-   */
-  @Override
-  public synchronized Set<String>
-      listPrivileges(Set<String> groups, ActiveRoleSet roleSet) {
-    Set<String> privileges = Sets.newHashSet();
-    if (roleSet != ActiveRoleSet.ALL) {
-      throw new UnsupportedOperationException("Impala does not support role subsets.");
-    }
-
-    // Collect all privileges granted to all roles.
-    for (String groupName: groups) {
-      List<Role> grantedRoles = getGrantedRoles(groupName);
-      for (Role role: grantedRoles) {
-        for (RolePrivilege privilege: role.getPrivileges()) {
-          String authorizeable = privilege.getName();
-          if (authorizeable == null) {
-            LOG.trace("Ignoring invalid privilege: " + privilege.getName());
-            continue;
-          }
-          privileges.add(authorizeable);
-        }
-      }
-    }
-    return privileges;
-  }
-
-  @Override
-  public void close() {
-    // Nothing to do, but required by PrivilegeCache.
-  }
-
-  /**
-   * Returns the privileges that have been granted to a role as a tabular result set.
-   * Allows for filtering based on a specific privilege spec or showing all privileges
-   * granted to the role. Used by the SHOW GRANT ROLE statement.
-   */
-  public synchronized TResultSet getRolePrivileges(String roleName, TPrivilege filter) {
-    TResultSet result = new TResultSet();
-    result.setSchema(new TResultSetMetadata());
-    result.getSchema().addToColumns(new TColumn("scope", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(new TColumn("database", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(new TColumn("table", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(new TColumn("column", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(new TColumn("uri", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(new TColumn("privilege", Type.STRING.toThrift()));
-    result.getSchema().addToColumns(
-        new TColumn("grant_option", Type.BOOLEAN.toThrift()));
-    result.getSchema().addToColumns(new TColumn("create_time", Type.STRING.toThrift()));
-    result.setRows(Lists.<TResultRow>newArrayList());
-
-    Role role = getRole(roleName);
-    if (role == null) return result;
-    for (RolePrivilege p: role.getPrivileges()) {
-      TPrivilege privilege = p.toThrift();
-      if (filter != null) {
-        // Check if the privileges are targeting the same object.
-        filter.setPrivilege_level(privilege.getPrivilege_level());
-        String privName = RolePrivilege.buildRolePrivilegeName(filter);
-        if (!privName.equalsIgnoreCase(privilege.getPrivilege_name())) continue;
-      }
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-      rowBuilder.add(privilege.getScope().toString());
-      rowBuilder.add(Strings.nullToEmpty(privilege.getDb_name()));
-      rowBuilder.add(Strings.nullToEmpty(privilege.getTable_name()));
-      rowBuilder.add(Strings.nullToEmpty(privilege.getColumn_name()));
-      rowBuilder.add(Strings.nullToEmpty(privilege.getUri()));
-      rowBuilder.add(privilege.getPrivilege_level().toString());
-      rowBuilder.add(Boolean.toString(privilege.isHas_grant_opt()));
-      if (privilege.getCreate_time_ms() == -1) {
-        rowBuilder.add(null);
-      } else {
-        rowBuilder.add(
-            TimeStamp.getNtpTime(privilege.getCreate_time_ms()).toDateString());
-      }
-      result.addToRows(rowBuilder.get());
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/BuiltinsDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/BuiltinsDb.java b/fe/src/main/java/com/cloudera/impala/catalog/BuiltinsDb.java
deleted file mode 100644
index 972756c..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/BuiltinsDb.java
+++ /dev/null
@@ -1,1052 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.api.Database;
-
-import com.cloudera.impala.analysis.ArithmeticExpr;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.CaseExpr;
-import com.cloudera.impala.analysis.CastExpr;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.InPredicate;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.LikePredicate;
-import com.cloudera.impala.builtins.ScalarBuiltins;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-
-public class BuiltinsDb extends Db {
-  public BuiltinsDb(String name, Catalog catalog) {
-    super(name, catalog, createMetastoreDb(name));
-    setIsSystemDb(true);
-    initBuiltins();
-  }
-
-  /**
-   * Initializes all the builtins.
-   */
-  private void initBuiltins() {
-    // Populate all aggregate builtins.
-    initAggregateBuiltins();
-
-    // Populate all scalar builtins.
-    ArithmeticExpr.initBuiltins(this);
-    BinaryPredicate.initBuiltins(this);
-    CastExpr.initBuiltins(this);
-    CaseExpr.initBuiltins(this);
-    CompoundPredicate.initBuiltins(this);
-    InPredicate.initBuiltins(this);
-    IsNullPredicate.initBuiltins(this);
-    LikePredicate.initBuiltins(this);
-    ScalarBuiltins.initBuiltins(this);
-  }
-
-  private static final String BUILTINS_DB_COMMENT = "System database for Impala builtin functions";
-
-  private static Database createMetastoreDb(String name) {
-    return new org.apache.hadoop.hive.metastore.api.Database(name,
-        BUILTINS_DB_COMMENT, "", Collections.<String,String>emptyMap());
-  }
-
-  private static final Map<Type, String> SAMPLE_INIT_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "19ReservoirSampleInitIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.TINYINT,
-            "19ReservoirSampleInitIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "19ReservoirSampleInitIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.INT,
-            "19ReservoirSampleInitIN10impala_udf6IntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.BIGINT,
-            "19ReservoirSampleInitIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.FLOAT,
-            "19ReservoirSampleInitIN10impala_udf8FloatValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "19ReservoirSampleInitIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.STRING,
-            "19ReservoirSampleInitIN10impala_udf9StringValEEEvPNS2_15FunctionContextEPS3_")
-        .put(Type.TIMESTAMP,
-            "19ReservoirSampleInitIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "19ReservoirSampleInitIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> SAMPLE_SERIALIZE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "24ReservoirSampleSerializeIN10impala_udf10BooleanValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.TINYINT,
-             "24ReservoirSampleSerializeIN10impala_udf10TinyIntValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.SMALLINT,
-             "24ReservoirSampleSerializeIN10impala_udf11SmallIntValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.INT,
-             "24ReservoirSampleSerializeIN10impala_udf6IntValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.BIGINT,
-             "24ReservoirSampleSerializeIN10impala_udf9BigIntValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.FLOAT,
-             "24ReservoirSampleSerializeIN10impala_udf8FloatValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.DOUBLE,
-             "24ReservoirSampleSerializeIN10impala_udf9DoubleValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.STRING,
-             "24ReservoirSampleSerializeIN10impala_udf9StringValEEEKS3_PNS2_15FunctionContextERS4_")
-        .put(Type.TIMESTAMP,
-             "24ReservoirSampleSerializeIN10impala_udf12TimestampValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .put(Type.DECIMAL,
-             "24ReservoirSampleSerializeIN10impala_udf10DecimalValEEEKNS2_9StringValEPNS2_15FunctionContextERS5_")
-        .build();
-
-  private static final Map<Type, String> SAMPLE_MERGE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "20ReservoirSampleMergeIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.TINYINT,
-            "20ReservoirSampleMergeIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.SMALLINT,
-            "20ReservoirSampleMergeIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.INT,
-            "20ReservoirSampleMergeIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.BIGINT,
-            "20ReservoirSampleMergeIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.FLOAT,
-            "20ReservoirSampleMergeIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.DOUBLE,
-            "20ReservoirSampleMergeIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.STRING,
-            "20ReservoirSampleMergeIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKS3_PS3_")
-        .put(Type.TIMESTAMP,
-            "20ReservoirSampleMergeIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .put(Type.DECIMAL,
-            "20ReservoirSampleMergeIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKNS2_9StringValEPS6_")
-        .build();
-
-  private static final Map<Type, String> SAMPLE_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "21ReservoirSampleUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TINYINT,
-            "21ReservoirSampleUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "21ReservoirSampleUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "21ReservoirSampleUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "21ReservoirSampleUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "21ReservoirSampleUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "21ReservoirSampleUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.STRING,
-            "21ReservoirSampleUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-        .put(Type.TIMESTAMP,
-            "21ReservoirSampleUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "21ReservoirSampleUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> SAMPLE_FINALIZE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "23ReservoirSampleFinalizeIN10impala_udf10BooleanValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.TINYINT,
-             "23ReservoirSampleFinalizeIN10impala_udf10TinyIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.SMALLINT,
-             "23ReservoirSampleFinalizeIN10impala_udf11SmallIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.INT,
-             "23ReservoirSampleFinalizeIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.BIGINT,
-             "23ReservoirSampleFinalizeIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.FLOAT,
-             "23ReservoirSampleFinalizeIN10impala_udf8FloatValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.DOUBLE,
-             "23ReservoirSampleFinalizeIN10impala_udf9DoubleValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.STRING,
-             "23ReservoirSampleFinalizeIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKS3_")
-        .put(Type.TIMESTAMP,
-             "23ReservoirSampleFinalizeIN10impala_udf12TimestampValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.DECIMAL,
-             "23ReservoirSampleFinalizeIN10impala_udf10DecimalValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .build();
-
-  private static final Map<Type, String> UPDATE_VAL_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "9UpdateValIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-             "9UpdateValIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-             "9UpdateValIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-             "9UpdateValIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-             "9UpdateValIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-             "9UpdateValIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-             "9UpdateValIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-             "9UpdateValIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-             "9UpdateValIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-             "9UpdateValIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  private static final Map<Type, String> APPX_MEDIAN_FINALIZE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "18AppxMedianFinalizeIN10impala_udf10BooleanValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "18AppxMedianFinalizeIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.TINYINT,
-            "18AppxMedianFinalizeIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "18AppxMedianFinalizeIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.TIMESTAMP,
-            "18AppxMedianFinalizeIN10impala_udf12TimestampValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.INT,
-            "18AppxMedianFinalizeIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.FLOAT,
-            "18AppxMedianFinalizeIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.BIGINT,
-            "18AppxMedianFinalizeIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "18AppxMedianFinalizeIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.STRING,
-            "18AppxMedianFinalizeIN10impala_udf9StringValEEET_PNS2_15FunctionContextERKS3_")
-        .build();
-
-  private static final Map<Type, String> HISTOGRAM_FINALIZE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "17HistogramFinalizeIN10impala_udf10BooleanValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.TINYINT,
-             "17HistogramFinalizeIN10impala_udf10TinyIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.SMALLINT,
-             "17HistogramFinalizeIN10impala_udf11SmallIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.INT,
-             "17HistogramFinalizeIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.BIGINT,
-             "17HistogramFinalizeIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.FLOAT,
-             "17HistogramFinalizeIN10impala_udf8FloatValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.DOUBLE,
-             "17HistogramFinalizeIN10impala_udf9DoubleValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.STRING,
-             "17HistogramFinalizeIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKS3_")
-        .put(Type.TIMESTAMP,
-             "17HistogramFinalizeIN10impala_udf12TimestampValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .put(Type.DECIMAL,
-             "17HistogramFinalizeIN10impala_udf10DecimalValEEENS2_9StringValEPNS2_15FunctionContextERKS4_")
-        .build();
-
-  private static final Map<Type, String> HLL_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "9HllUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TINYINT,
-            "9HllUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "9HllUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "9HllUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "9HllUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "9HllUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "9HllUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.STRING,
-            "9HllUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-        .put(Type.TIMESTAMP,
-            "9HllUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "9HllUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> PC_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "8PcUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TINYINT,
-            "8PcUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "8PcUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "8PcUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "8PcUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "8PcUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "8PcUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.STRING,
-            "8PcUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-        .put(Type.TIMESTAMP,
-            "8PcUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-         .put(Type.DECIMAL,
-            "8PcUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .build();
-
-    private static final Map<Type, String> PCSA_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-          .put(Type.BOOLEAN,
-              "10PcsaUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.TINYINT,
-              "10PcsaUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.SMALLINT,
-              "10PcsaUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.INT,
-              "10PcsaUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.BIGINT,
-              "10PcsaUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.FLOAT,
-              "10PcsaUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.DOUBLE,
-              "10PcsaUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.STRING,
-              "10PcsaUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-          .put(Type.TIMESTAMP,
-              "10PcsaUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .put(Type.DECIMAL,
-              "10PcsaUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-          .build();
-
-  private static final Map<Type, String> MIN_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "3MinIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-            "3MinIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-            "3MinIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-            "3MinIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-            "3MinIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-            "3MinIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-            "3MinIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-            "3MinIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-            "3MinIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-            "3MinIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  private static final Map<Type, String> MAX_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "3MaxIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-            "3MaxIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-            "3MaxIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-            "3MaxIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-            "3MaxIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-            "3MaxIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-            "3MaxIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-            "3MaxIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-            "3MaxIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-            "3MaxIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  private static final Map<Type, String> STDDEV_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.TINYINT,
-            "14KnuthVarUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "14KnuthVarUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "14KnuthVarUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "14KnuthVarUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "14KnuthVarUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "14KnuthVarUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .build();
-  private static final Map<Type, String> OFFSET_FN_INIT_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "12OffsetFnInitIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.DECIMAL,
-             "12OffsetFnInitIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.TINYINT,
-             "12OffsetFnInitIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.SMALLINT,
-             "12OffsetFnInitIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.TIMESTAMP,
-             "12OffsetFnInitIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.INT,
-             "12OffsetFnInitIN10impala_udf6IntValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.FLOAT,
-             "12OffsetFnInitIN10impala_udf8FloatValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.BIGINT,
-             "12OffsetFnInitIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.DOUBLE,
-             "12OffsetFnInitIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextEPT_")
-        .put(Type.STRING,
-             "12OffsetFnInitIN10impala_udf9StringValEEEvPNS2_15FunctionContextEPT_")
-        .build();
-
-  private static final Map<Type, String> OFFSET_FN_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "14OffsetFnUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.DECIMAL,
-             "14OffsetFnUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.TINYINT,
-             "14OffsetFnUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.SMALLINT,
-             "14OffsetFnUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.TIMESTAMP,
-             "14OffsetFnUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.INT,
-             "14OffsetFnUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.FLOAT,
-             "14OffsetFnUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.BIGINT,
-             "14OffsetFnUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_RKS3_S8_PS6_")
-        .put(Type.DOUBLE,
-             "14OffsetFnUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .put(Type.STRING,
-             "14OffsetFnUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValES8_PS6_")
-        .build();
-
-  private static final Map<Type, String> FIRST_VALUE_REWRITE_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "21FirstValRewriteUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.DECIMAL,
-             "21FirstValRewriteUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.TINYINT,
-             "21FirstValRewriteUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.SMALLINT,
-             "21FirstValRewriteUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.TIMESTAMP,
-             "21FirstValRewriteUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.INT,
-             "21FirstValRewriteUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.FLOAT,
-             "21FirstValRewriteUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.BIGINT,
-             "21FirstValRewriteUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_RKS3_PS6_")
-        .put(Type.DOUBLE,
-             "21FirstValRewriteUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .put(Type.STRING,
-             "21FirstValRewriteUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_RKNS2_9BigIntValEPS6_")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_REMOVE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "13LastValRemoveIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-             "13LastValRemoveIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-             "13LastValRemoveIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-             "13LastValRemoveIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-             "13LastValRemoveIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-             "13LastValRemoveIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-             "13LastValRemoveIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-             "13LastValRemoveIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-             "13LastValRemoveIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-             "13LastValRemoveIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_IGNORE_NULLS_INIT_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "22LastValIgnoreNullsInitIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.TINYINT,
-            "22LastValIgnoreNullsInitIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "22LastValIgnoreNullsInitIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.INT,
-            "22LastValIgnoreNullsInitIN10impala_udf6IntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.BIGINT,
-            "22LastValIgnoreNullsInitIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.FLOAT,
-            "22LastValIgnoreNullsInitIN10impala_udf8FloatValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "22LastValIgnoreNullsInitIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.STRING,
-            "22LastValIgnoreNullsInitIN10impala_udf9StringValEEEvPNS2_15FunctionContextEPS3_")
-        .put(Type.TIMESTAMP,
-            "22LastValIgnoreNullsInitIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "22LastValIgnoreNullsInitIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextEPNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_IGNORE_NULLS_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "24LastValIgnoreNullsUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "24LastValIgnoreNullsUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TINYINT,
-            "24LastValIgnoreNullsUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "24LastValIgnoreNullsUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TIMESTAMP,
-            "24LastValIgnoreNullsUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "24LastValIgnoreNullsUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "24LastValIgnoreNullsUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "24LastValIgnoreNullsUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "24LastValIgnoreNullsUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.STRING,
-            "24LastValIgnoreNullsUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_IGNORE_NULLS_REMOVE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "24LastValIgnoreNullsRemoveIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "24LastValIgnoreNullsRemoveIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TINYINT,
-            "24LastValIgnoreNullsRemoveIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "24LastValIgnoreNullsRemoveIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.TIMESTAMP,
-            "24LastValIgnoreNullsRemoveIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.INT,
-            "24LastValIgnoreNullsRemoveIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.FLOAT,
-            "24LastValIgnoreNullsRemoveIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.BIGINT,
-            "24LastValIgnoreNullsRemoveIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "24LastValIgnoreNullsRemoveIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE")
-        .put(Type.STRING,
-            "24LastValIgnoreNullsRemoveIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS3_")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_IGNORE_NULLS_GET_VALUE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "26LastValIgnoreNullsGetValueIN10impala_udf10BooleanValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.TINYINT,
-            "26LastValIgnoreNullsGetValueIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "26LastValIgnoreNullsGetValueIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.INT,
-            "26LastValIgnoreNullsGetValueIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.BIGINT,
-            "26LastValIgnoreNullsGetValueIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.FLOAT,
-            "26LastValIgnoreNullsGetValueIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "26LastValIgnoreNullsGetValueIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.STRING,
-            "26LastValIgnoreNullsGetValueIN10impala_udf9StringValEEET_PNS2_15FunctionContextERKS3_")
-        .put(Type.TIMESTAMP,
-            "26LastValIgnoreNullsGetValueIN10impala_udf12TimestampValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "26LastValIgnoreNullsGetValueIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> LAST_VALUE_IGNORE_NULLS_FINALIZE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf10BooleanValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.TINYINT,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.SMALLINT,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.INT,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.BIGINT,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.FLOAT,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DOUBLE,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.STRING,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf9StringValEEET_PNS2_15FunctionContextERKS3_")
-        .put(Type.TIMESTAMP,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf12TimestampValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .put(Type.DECIMAL,
-            "26LastValIgnoreNullsFinalizeIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKNS2_9StringValE")
-        .build();
-
-  private static final Map<Type, String> FIRST_VALUE_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-             "14FirstValUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-             "14FirstValUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-             "14FirstValUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-             "14FirstValUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-             "14FirstValUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-             "14FirstValUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-             "14FirstValUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-             "14FirstValUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-             "14FirstValUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-             "14FirstValUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  private static final Map<Type, String> FIRST_VALUE_IGNORE_NULLS_UPDATE_SYMBOL =
-      ImmutableMap.<Type, String>builder()
-        .put(Type.BOOLEAN,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf10BooleanValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DECIMAL,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf10DecimalValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TINYINT,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf10TinyIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.SMALLINT,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf11SmallIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.TIMESTAMP,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf12TimestampValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.INT,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf6IntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.FLOAT,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf8FloatValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.BIGINT,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.DOUBLE,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .put(Type.STRING,
-            "25FirstValIgnoreNullsUpdateIN10impala_udf9StringValEEEvPNS2_15FunctionContextERKT_PS6_")
-        .build();
-
-  // Populate all the aggregate builtins in the catalog.
-  // null symbols indicate the function does not need that step of the evaluation.
-  // An empty symbol indicates a TODO for the BE to implement the function.
-  // TODO: We could also generate this in python but I'm not sure that is easier.
-  private void initAggregateBuiltins() {
-    final String prefix = "_ZN6impala18AggregateFunctions";
-    final String initNullString = prefix +
-        "14InitNullStringEPN10impala_udf15FunctionContextEPNS1_9StringValE";
-    final String initNull = prefix +
-        "8InitNullEPN10impala_udf15FunctionContextEPNS1_6AnyValE";
-    final String stringValSerializeOrFinalize = prefix +
-        "28StringValSerializeOrFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE";
-    final String stringValGetValue = prefix +
-        "17StringValGetValueEPN10impala_udf15FunctionContextERKNS1_9StringValE";
-
-    Db db = this;
-    // Count (*)
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "count",
-        new ArrayList<Type>(), Type.BIGINT, Type.BIGINT,
-        prefix + "8InitZeroIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPT_",
-        prefix + "15CountStarUpdateEPN10impala_udf15FunctionContextEPNS1_9BigIntValE",
-        prefix + "10CountMergeEPN10impala_udf15FunctionContextERKNS1_9BigIntValEPS4_",
-        null, null,
-        prefix + "15CountStarRemoveEPN10impala_udf15FunctionContextEPNS1_9BigIntValE",
-        null, false, true, true));
-
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue; // NULL is handled through type promotion.
-      if (t.isScalarType(PrimitiveType.CHAR)) continue; // promoted to STRING
-      if (t.isScalarType(PrimitiveType.VARCHAR)) continue; // promoted to STRING
-      // Count
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "count",
-          Lists.newArrayList(t), Type.BIGINT, Type.BIGINT,
-          prefix + "8InitZeroIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPT_",
-          prefix + "11CountUpdateEPN10impala_udf15FunctionContextERKNS1_6AnyValEPNS1_9BigIntValE",
-          prefix + "10CountMergeEPN10impala_udf15FunctionContextERKNS1_9BigIntValEPS4_",
-          null, null,
-          prefix + "11CountRemoveEPN10impala_udf15FunctionContextERKNS1_6AnyValEPNS1_9BigIntValE",
-          null, false, true, true));
-
-      // Min
-      String minMaxInit = t.isStringType() ? initNullString : initNull;
-      String minMaxSerializeOrFinalize = t.isStringType() ?
-          stringValSerializeOrFinalize : null;
-      String minMaxGetValue = t.isStringType() ? stringValGetValue : null;
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "min",
-          Lists.newArrayList(t), t, t, minMaxInit,
-          prefix + MIN_UPDATE_SYMBOL.get(t),
-          prefix + MIN_UPDATE_SYMBOL.get(t),
-          minMaxSerializeOrFinalize, minMaxGetValue,
-          null, minMaxSerializeOrFinalize, true, true, false));
-      // Max
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "max",
-          Lists.newArrayList(t), t, t, minMaxInit,
-          prefix + MAX_UPDATE_SYMBOL.get(t),
-          prefix + MAX_UPDATE_SYMBOL.get(t),
-          minMaxSerializeOrFinalize, minMaxGetValue,
-          null, minMaxSerializeOrFinalize, true, true, false));
-      // Sample
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "sample",
-          Lists.newArrayList(t), Type.STRING, Type.STRING,
-          prefix + SAMPLE_INIT_SYMBOL.get(t),
-          prefix + SAMPLE_UPDATE_SYMBOL.get(t),
-          prefix + SAMPLE_MERGE_SYMBOL.get(t),
-          prefix + SAMPLE_SERIALIZE_SYMBOL.get(t),
-          prefix + SAMPLE_FINALIZE_SYMBOL.get(t),
-          false, false, true));
-
-      // Approximate median
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "appx_median",
-          Lists.newArrayList(t), t, Type.STRING,
-          prefix + SAMPLE_INIT_SYMBOL.get(t),
-          prefix + SAMPLE_UPDATE_SYMBOL.get(t),
-          prefix + SAMPLE_MERGE_SYMBOL.get(t),
-          prefix + SAMPLE_SERIALIZE_SYMBOL.get(t),
-          prefix + APPX_MEDIAN_FINALIZE_SYMBOL.get(t),
-          false, false, true));
-
-      // Histogram
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "histogram",
-          Lists.newArrayList(t), Type.STRING, Type.STRING,
-          prefix + SAMPLE_INIT_SYMBOL.get(t),
-          prefix + SAMPLE_UPDATE_SYMBOL.get(t),
-          prefix + SAMPLE_MERGE_SYMBOL.get(t),
-          prefix + SAMPLE_SERIALIZE_SYMBOL.get(t),
-          prefix + HISTOGRAM_FINALIZE_SYMBOL.get(t),
-          false, false, true));
-
-      // NDV
-      // TODO: this needs to switch to CHAR(64) as the intermediate type
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "ndv",
-          Lists.newArrayList(t), Type.BIGINT, Type.STRING,
-          prefix + "7HllInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-          prefix + HLL_UPDATE_SYMBOL.get(t),
-          prefix + "8HllMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-          stringValSerializeOrFinalize,
-          prefix + "11HllFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-          true, false, true));
-
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "ndv_no_finalize",
-          Lists.newArrayList(t), Type.STRING, Type.STRING,
-          prefix + "7HllInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-          prefix + HLL_UPDATE_SYMBOL.get(t),
-          prefix + "8HllMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-          stringValSerializeOrFinalize,
-          "_Z20IncrementNdvFinalizePN10impala_udf15FunctionContextERKNS_9StringValE",
-          true, false, true));
-
-      // distinctpc
-      // TODO: this needs to switch to CHAR(64) as the intermediate type
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "distinctpc",
-          Lists.newArrayList(t), Type.BIGINT, Type.STRING,
-          prefix + "6PcInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-          prefix + PC_UPDATE_SYMBOL.get(t),
-          prefix + "7PcMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-          stringValSerializeOrFinalize,
-          prefix + "10PcFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-          false, false, true));
-
-      // distinctpcsa
-      // TODO: this needs to switch to CHAR(64) as the intermediate type
-      db.addBuiltin(AggregateFunction.createBuiltin(db, "distinctpcsa",
-          Lists.newArrayList(t), Type.BIGINT, Type.STRING,
-          prefix + "6PcInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-          prefix + PCSA_UPDATE_SYMBOL.get(t),
-          prefix + "7PcMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-          stringValSerializeOrFinalize,
-          prefix + "12PcsaFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-          false, false, true));
-
-      if (STDDEV_UPDATE_SYMBOL.containsKey(t)) {
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "stddev",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "19KnuthStddevFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "stddev_samp",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "19KnuthStddevFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "stddev_pop",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "22KnuthStddevPopFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "variance",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "16KnuthVarFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "variance_samp",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "16KnuthVarFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "var_samp",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "16KnuthVarFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "variance_pop",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "19KnuthVarPopFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-        db.addBuiltin(AggregateFunction.createBuiltin(db, "var_pop",
-            Lists.newArrayList(t), Type.DOUBLE, ScalarType.createCharType(24),
-            prefix + "12KnuthVarInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-            prefix + STDDEV_UPDATE_SYMBOL.get(t),
-            prefix + "13KnuthVarMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-            null,
-            prefix + "19KnuthVarPopFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-            false, false, false));
-      }
-    }
-
-    // Sum
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "sum",
-        Lists.<Type>newArrayList(Type.BIGINT), Type.BIGINT, Type.BIGINT, initNull,
-        prefix + "9SumUpdateIN10impala_udf9BigIntValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        prefix + "9SumUpdateIN10impala_udf9BigIntValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        null, null,
-        prefix + "9SumRemoveIN10impala_udf9BigIntValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        null, false, true, false));
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "sum",
-        Lists.<Type>newArrayList(Type.DOUBLE), Type.DOUBLE, Type.DOUBLE, initNull,
-        prefix + "9SumUpdateIN10impala_udf9DoubleValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        prefix + "9SumUpdateIN10impala_udf9DoubleValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        null, null,
-        prefix + "9SumRemoveIN10impala_udf9DoubleValES3_EEvPNS2_15FunctionContextERKT_PT0_",
-        null, false, true, false));
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "sum",
-        Lists.<Type>newArrayList(Type.DECIMAL), Type.DECIMAL, Type.DECIMAL, initNull,
-        prefix + "16SumDecimalUpdateEPN10impala_udf15FunctionContextERKNS1_10DecimalValEPS4_",
-        prefix + "15SumDecimalMergeEPN10impala_udf15FunctionContextERKNS1_10DecimalValEPS4_",
-        null, null,
-        prefix + "16SumDecimalRemoveEPN10impala_udf15FunctionContextERKNS1_10DecimalValEPS4_",
-        null, false, true, false));
-
-    // Avg
-    // TODO: switch to CHAR(sizeof(AvgIntermediateType) when that becomes available
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "avg",
-        Lists.<Type>newArrayList(Type.BIGINT), Type.DOUBLE, Type.STRING,
-        prefix + "7AvgInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "9AvgUpdateIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE",
-        prefix + "8AvgMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix + "11AvgGetValueEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        prefix + "9AvgRemoveIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE",
-        prefix + "11AvgFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, true, false));
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "avg",
-        Lists.<Type>newArrayList(Type.DOUBLE), Type.DOUBLE, Type.STRING,
-        prefix + "7AvgInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "9AvgUpdateIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE",
-        prefix + "8AvgMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix + "11AvgGetValueEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        prefix + "9AvgRemoveIN10impala_udf9DoubleValEEEvPNS2_15FunctionContextERKT_PNS2_9StringValE",
-        prefix + "11AvgFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, true, false));
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "avg",
-        Lists.<Type>newArrayList(Type.DECIMAL), Type.DECIMAL, Type.STRING,
-        prefix + "14DecimalAvgInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "16DecimalAvgUpdateEPN10impala_udf15FunctionContextERKNS1_10DecimalValEPNS1_9StringValE",
-        prefix + "15DecimalAvgMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix + "18DecimalAvgGetValueEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        prefix + "16DecimalAvgRemoveEPN10impala_udf15FunctionContextERKNS1_10DecimalValEPNS1_9StringValE",
-        prefix + "18DecimalAvgFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, true, false));
-    // Avg(Timestamp)
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "avg",
-        Lists.<Type>newArrayList(Type.TIMESTAMP), Type.TIMESTAMP, Type.STRING,
-        prefix + "7AvgInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "18TimestampAvgUpdateEPN10impala_udf15FunctionContextERKNS1_12TimestampValEPNS1_9StringValE",
-        prefix + "8AvgMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix + "20TimestampAvgGetValueEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        prefix + "18TimestampAvgRemoveEPN10impala_udf15FunctionContextERKNS1_12TimestampValEPNS1_9StringValE",
-        prefix + "20TimestampAvgFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, true, false));
-
-    // Group_concat(string)
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "group_concat",
-        Lists.<Type>newArrayList(Type.STRING), Type.STRING, Type.STRING, initNullString,
-        prefix +
-            "18StringConcatUpdateEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        prefix +
-            "17StringConcatMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix +
-            "20StringConcatFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, false, false));
-    // Group_concat(string, string)
-    db.addBuiltin(AggregateFunction.createBuiltin(db, "group_concat",
-        Lists.<Type>newArrayList(Type.STRING, Type.STRING), Type.STRING, Type.STRING,
-        initNullString,
-        prefix +
-            "18StringConcatUpdateEPN10impala_udf15FunctionContextERKNS1_9StringValES6_PS4_",
-        prefix +
-            "17StringConcatMergeEPN10impala_udf15FunctionContextERKNS1_9StringValEPS4_",
-        stringValSerializeOrFinalize,
-        prefix +
-            "20StringConcatFinalizeEPN10impala_udf15FunctionContextERKNS1_9StringValE",
-        false, false, false));
-
-    // analytic functions
-    // Rank
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(db, "rank",
-        Lists.<Type>newArrayList(), Type.BIGINT, Type.STRING,
-        prefix + "8RankInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "10RankUpdateEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        null,
-        prefix + "12RankGetValueEPN10impala_udf15FunctionContextERNS1_9StringValE",
-        prefix + "12RankFinalizeEPN10impala_udf15FunctionContextERNS1_9StringValE"));
-    // Dense rank
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(db, "dense_rank",
-        Lists.<Type>newArrayList(), Type.BIGINT, Type.STRING,
-        prefix + "8RankInitEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        prefix + "15DenseRankUpdateEPN10impala_udf15FunctionContextEPNS1_9StringValE",
-        null,
-        prefix + "17DenseRankGetValueEPN10impala_udf15FunctionContextERNS1_9StringValE",
-        prefix + "12RankFinalizeEPN10impala_udf15FunctionContextERNS1_9StringValE"));
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-        db, "row_number", new ArrayList<Type>(), Type.BIGINT, Type.BIGINT,
-        prefix + "8InitZeroIN10impala_udf9BigIntValEEEvPNS2_15FunctionContextEPT_",
-        prefix + "15CountStarUpdateEPN10impala_udf15FunctionContextEPNS1_9BigIntValE",
-        prefix + "10CountMergeEPN10impala_udf15FunctionContextERKNS1_9BigIntValEPS4_",
-        null, null));
-
-    // The following 3 functions are never directly executed because they get rewritten
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-        db, "percent_rank", Lists.<Type>newArrayList(), Type.DOUBLE, Type.STRING));
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-        db, "cume_dist", Lists.<Type>newArrayList(), Type.DOUBLE, Type.STRING));
-    db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-        db, "ntile", Lists.<Type>newArrayList(Type.BIGINT), Type.BIGINT, Type.STRING));
-
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue; // NULL is handled through type promotion.
-      if (t.isScalarType(PrimitiveType.CHAR)) continue; // promoted to STRING
-      if (t.isScalarType(PrimitiveType.VARCHAR)) continue; // promoted to STRING
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "first_value", Lists.newArrayList(t), t, t,
-          t.isStringType() ? initNullString : initNull,
-          prefix + FIRST_VALUE_UPDATE_SYMBOL.get(t),
-          null,
-          t == Type.STRING ? stringValGetValue : null,
-          t == Type.STRING ? stringValSerializeOrFinalize : null));
-      // Implements FIRST_VALUE for some windows that require rewrites during planning.
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "first_value_rewrite", Lists.newArrayList(t, Type.BIGINT), t, t,
-          t.isStringType() ? initNullString : initNull,
-          prefix + FIRST_VALUE_REWRITE_UPDATE_SYMBOL.get(t),
-          null,
-          t == Type.STRING ? stringValGetValue : null,
-          t == Type.STRING ? stringValSerializeOrFinalize : null,
-          false));
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "first_value_ignore_nulls", Lists.newArrayList(t), t, t,
-          t.isStringType() ? initNullString : initNull,
-          prefix + FIRST_VALUE_IGNORE_NULLS_UPDATE_SYMBOL.get(t),
-          null,
-          t == Type.STRING ? stringValGetValue : null,
-          t == Type.STRING ? stringValSerializeOrFinalize : null,
-          false));
-
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "last_value", Lists.newArrayList(t), t, t,
-          t.isStringType() ? initNullString : initNull,
-          prefix + UPDATE_VAL_SYMBOL.get(t),
-          prefix + LAST_VALUE_REMOVE_SYMBOL.get(t),
-          t == Type.STRING ? stringValGetValue : null,
-          t == Type.STRING ? stringValSerializeOrFinalize : null));
-
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "last_value_ignore_nulls", Lists.newArrayList(t), t, Type.STRING,
-          prefix + LAST_VALUE_IGNORE_NULLS_INIT_SYMBOL.get(t),
-          prefix + LAST_VALUE_IGNORE_NULLS_UPDATE_SYMBOL.get(t),
-          prefix + LAST_VALUE_IGNORE_NULLS_REMOVE_SYMBOL.get(t),
-          prefix + LAST_VALUE_IGNORE_NULLS_GET_VALUE_SYMBOL.get(t),
-          prefix + LAST_VALUE_IGNORE_NULLS_FINALIZE_SYMBOL.get(t),
-          false));
-
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "lag", Lists.newArrayList(t, Type.BIGINT, t), t, t,
-          prefix + OFFSET_FN_INIT_SYMBOL.get(t),
-          prefix + OFFSET_FN_UPDATE_SYMBOL.get(t),
-          null, null, null));
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-          db, "lead", Lists.newArrayList(t, Type.BIGINT, t), t, t,
-          prefix + OFFSET_FN_INIT_SYMBOL.get(t),
-          prefix + OFFSET_FN_UPDATE_SYMBOL.get(t),
-          null, null, null));
-
-      // lead() and lag() the default offset and the default value should be
-      // rewritten to call the overrides that take all parameters.
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-            db, "lag", Lists.newArrayList(t), t, t));
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-            db, "lag", Lists.newArrayList(t, Type.BIGINT), t, t));
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-            db, "lead", Lists.newArrayList(t), t, t));
-      db.addBuiltin(AggregateFunction.createAnalyticBuiltin(
-            db, "lead", Lists.newArrayList(t, Type.BIGINT), t, t));
-    }
-  }
-}


[24/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/InternalException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/InternalException.java b/fe/src/main/java/com/cloudera/impala/common/InternalException.java
deleted file mode 100644
index 64a8d5e..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/InternalException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Thrown for internal server errors.
- *
- */
-public class InternalException extends ImpalaException {
-  public InternalException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  public InternalException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/JniUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/JniUtil.java b/fe/src/main/java/com/cloudera/impala/common/JniUtil.java
deleted file mode 100644
index 2f11c52..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/JniUtil.java
+++ /dev/null
@@ -1,165 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryPoolMXBean;
-import java.lang.management.MemoryUsage;
-import java.util.ArrayList;
-
-import org.apache.thrift.TBase;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.TDeserializer;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-
-import com.cloudera.impala.thrift.TGetJvmMetricsRequest;
-import com.cloudera.impala.thrift.TGetJvmMetricsResponse;
-import com.cloudera.impala.thrift.TJvmMemoryPool;
-
-/**
- * Utility class with methods intended for JNI clients
- */
-public class JniUtil {
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-
-  /**
-   * Returns a formatted string containing the simple exception name and the
-   * exception message without the full stack trace. Includes the
-   * the chain of causes each in a separate line.
-   */
-  public static String throwableToString(Throwable t) {
-    Writer output = new StringWriter();
-    try {
-      output.write(String.format("%s: %s", t.getClass().getSimpleName(),
-          t.getMessage()));
-      // Follow the chain of exception causes and print them as well.
-      Throwable cause = t;
-      while ((cause = cause.getCause()) != null) {
-        output.write(String.format("\nCAUSED BY: %s: %s",
-            cause.getClass().getSimpleName(), cause.getMessage()));
-      }
-    } catch (IOException e) {
-      throw new Error(e);
-    }
-    return output.toString();
-  }
-
-  /**
-   * Returns the stack trace of the Throwable object.
-   */
-  public static String throwableToStackTrace(Throwable t) {
-    Writer output = new StringWriter();
-    t.printStackTrace(new PrintWriter(output));
-    return output.toString();
-  }
-
-  /**
-   * Deserialize a serialized form of a Thrift data structure to its object form.
-   */
-  public static <T extends TBase<?, ?>, F extends TProtocolFactory>
-  void deserializeThrift(F protocolFactory, T result, byte[] thriftData)
-      throws ImpalaException {
-    // TODO: avoid creating deserializer for each query?
-    TDeserializer deserializer = new TDeserializer(protocolFactory);
-    try {
-      deserializer.deserialize(result, thriftData);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Collect the JVM's memory statistics into a thrift structure for translation into
-   * Impala metrics by the backend. A synthetic 'total' memory pool is included with
-   * aggregate statistics for all real pools.
-   */
-  public static byte[] getJvmMetrics(byte[] argument) throws ImpalaException {
-    TGetJvmMetricsRequest request = new TGetJvmMetricsRequest();
-    JniUtil.deserializeThrift(protocolFactory_, request, argument);
-
-    TGetJvmMetricsResponse jvmMetrics = new TGetJvmMetricsResponse();
-    jvmMetrics.setMemory_pools(new ArrayList<TJvmMemoryPool>());
-    TJvmMemoryPool totalUsage = new TJvmMemoryPool();
-    boolean is_total =
-        request.getMemory_pool() != null && request.getMemory_pool().equals("total");
-
-    if (request.get_all || is_total) {
-      totalUsage.setName("total");
-      jvmMetrics.getMemory_pools().add(totalUsage);
-    }
-    for (MemoryPoolMXBean memBean: ManagementFactory.getMemoryPoolMXBeans()) {
-      if (request.get_all || is_total ||
-          memBean.getName().equals(request.getMemory_pool())) {
-        TJvmMemoryPool usage = new TJvmMemoryPool();
-        MemoryUsage beanUsage = memBean.getUsage();
-        usage.setCommitted(beanUsage.getCommitted());
-        usage.setInit(beanUsage.getInit());
-        usage.setMax(beanUsage.getMax());
-        usage.setUsed(beanUsage.getUsed());
-        usage.setName(memBean.getName());
-
-        totalUsage.committed += beanUsage.getCommitted();
-        totalUsage.init += beanUsage.getInit();
-        totalUsage.max += beanUsage.getMax();
-        totalUsage.used += beanUsage.getUsed();
-
-        MemoryUsage peakUsage = memBean.getPeakUsage();
-        usage.setPeak_committed(peakUsage.getCommitted());
-        usage.setPeak_init(peakUsage.getInit());
-        usage.setPeak_max(peakUsage.getMax());
-        usage.setPeak_used(peakUsage.getUsed());
-
-        totalUsage.peak_committed += peakUsage.getCommitted();
-        totalUsage.peak_init += peakUsage.getInit();
-        totalUsage.peak_max += peakUsage.getMax();
-        totalUsage.peak_used += peakUsage.getUsed();
-
-        if (!is_total) {
-          jvmMetrics.getMemory_pools().add(usage);
-          if (!request.get_all) break;
-        }
-      }
-    }
-    TSerializer serializer = new TSerializer(protocolFactory_);
-    try {
-      return serializer.serialize(jvmMetrics);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  /**
-   * Get Java version and vendor information
-   */
-  public static String getJavaVersion() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("Java Version Info: ");
-    sb.append(System.getProperty("java.runtime.name"));
-    sb.append(" (");
-    sb.append(System.getProperty("java.runtime.version"));
-    sb.append(")");
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/NotImplementedException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/NotImplementedException.java b/fe/src/main/java/com/cloudera/impala/common/NotImplementedException.java
deleted file mode 100644
index b06a130..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/NotImplementedException.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * Thrown for SQL statements that require as yet unimplemented functionality.
- *
- */
-public class NotImplementedException extends ImpalaException {
-  public NotImplementedException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/Pair.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/Pair.java b/fe/src/main/java/com/cloudera/impala/common/Pair.java
deleted file mode 100644
index c17c535..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/Pair.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * The equivalent of C++'s std::pair<>.
- */
-public class Pair<F, S> {
-  public F first;
-  public S second;
-
-  public Pair(F first, S second) {
-    this.first = first;
-    this.second = second;
-  }
-
-  @Override
-  /**
-   * A pair is equal if both parts are equal().
-   */
-  public boolean equals(Object o) {
-    if (o instanceof Pair) {
-      Pair<F,S> other = (Pair<F,S>) o;
-      return this.first.equals(other.first) && this.second.equals(other.second);
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    int hashFirst = first != null ? first.hashCode() : 0;
-    int hashSecond = second != null ? second.hashCode() : 0;
-
-    return (hashFirst + hashSecond) * hashSecond + hashFirst;
-  }
-
-  static public <F, S> Pair<F, S> create(F first, S second) {
-    return new Pair<F, S>(first, second);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/PrintUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/PrintUtils.java b/fe/src/main/java/com/cloudera/impala/common/PrintUtils.java
deleted file mode 100644
index 78e0d6d..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/PrintUtils.java
+++ /dev/null
@@ -1,86 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import static com.cloudera.impala.common.ByteUnits.GIGABYTE;
-import static com.cloudera.impala.common.ByteUnits.KILOBYTE;
-import static com.cloudera.impala.common.ByteUnits.MEGABYTE;
-import static com.cloudera.impala.common.ByteUnits.PETABYTE;
-import static com.cloudera.impala.common.ByteUnits.TERABYTE;
-
-import java.text.DecimalFormat;
-
-import org.apache.commons.lang3.StringUtils;
-
-/**
- * Utility functions for pretty printing.
- */
-public class PrintUtils {
-  /**
-   * Prints the given number of bytes in PB, TB, GB, MB, KB with 2 decimal points.
-   * For example 5000 will be returned as 4.88KB.
-   */
-  public static String printBytes(long bytes) {
-    double result = bytes;
-    // Avoid String.format() due to IMPALA-1572 which happens on JDK7 but not JDK6.
-    if (bytes >= PETABYTE) return new DecimalFormat(".00PB").format(result / PETABYTE);
-    if (bytes >= TERABYTE) return new DecimalFormat(".00TB").format(result / TERABYTE);
-    if (bytes >= GIGABYTE) return new DecimalFormat(".00GB").format(result / GIGABYTE);
-    if (bytes >= MEGABYTE) return new DecimalFormat(".00MB").format(result / MEGABYTE);
-    if (bytes >= KILOBYTE) return new DecimalFormat(".00KB").format(result / KILOBYTE);
-    return bytes + "B";
-  }
-
-  public static String printCardinality(String prefix, long cardinality) {
-    return prefix + "cardinality=" +
-        ((cardinality != -1) ? String.valueOf(cardinality) : "unavailable");
-  }
-
-  public static String printHosts(String prefix, long numHosts) {
-    return prefix + "hosts=" + ((numHosts != -1) ? numHosts : "unavailable");
-  }
-
-  public static String printMemCost(String prefix, long perHostMemCost) {
-    return prefix + "per-host-mem=" +
-        ((perHostMemCost != -1) ? printBytes(perHostMemCost) : "unavailable");
-  }
-
-  /**
-   * Prints the given square matrix into matrixStr. Separates cells by cellSpacing.
-   */
-  public static void printMatrix(boolean[][] matrix, int cellSpacing,
-      StringBuilder matrixStr) {
-    // Print labels.
-    matrixStr.append(StringUtils.repeat(' ', cellSpacing));
-    String formatStr = "%Xd".replace("X", String.valueOf(cellSpacing));
-    for (int i = 0; i < matrix.length; ++i) {
-      matrixStr.append(String.format(formatStr, i));
-    }
-    matrixStr.append("\n");
-
-    // Print matrix.
-    for (int i = 0; i < matrix.length; ++i) {
-      matrixStr.append(String.format(formatStr, i));
-      for (int j = 0; j < matrix.length; ++j) {
-        int cell = (matrix[i][j]) ? 1 : 0;
-        matrixStr.append(String.format(formatStr, cell));
-      }
-      matrixStr.append("\n");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/Reference.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/Reference.java b/fe/src/main/java/com/cloudera/impala/common/Reference.java
deleted file mode 100644
index aff1dae..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/Reference.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-/**
- * "Indirection layer" that allows returning an object via an output
- * parameter of a function call, similar to a pointer or reference parameter
- * in C/C++.
- * Example:
- *   Reference<T> ref = new Reference<T>();
- *   createT(ref);  // calls ref.setRef()
- *   <do something with ref.getRef()>;
- */
-public class Reference<RefType> {
-  protected RefType ref_;
-
-  public Reference(RefType ref) {
-    this.ref_ = ref;
-  }
-
-  public Reference() {
-    this.ref_ = null;
-  }
-
-  public RefType getRef() { return ref_; }
-  public void setRef(RefType ref) { this.ref_ = ref; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/RuntimeEnv.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/RuntimeEnv.java b/fe/src/main/java/com/cloudera/impala/common/RuntimeEnv.java
deleted file mode 100644
index febd657..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/RuntimeEnv.java
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TStartupOptions;
-
-/**
- * Contains runtime-specific parameters such as the number of CPU cores. Currently only
- * used in Plan cost estimation. The static RuntimeEnv members can be set so that tests
- * can rely on a machine-independent RuntimeEnv.
- */
-public class RuntimeEnv {
-  private final static Logger LOG = LoggerFactory.getLogger(RuntimeEnv.class);
-
-  public static RuntimeEnv INSTANCE = new RuntimeEnv();
-
-  private int numCores_;
-
-  // Indicates if column lineage information should be computed for each query.
-  private boolean computeLineage_;
-
-  // Indicates whether this is an environment for testing.
-  private boolean isTestEnv_;
-
-  public RuntimeEnv() {
-    reset();
-    try {
-      TStartupOptions opts = FeSupport.GetStartupOptions();
-      computeLineage_ = opts.compute_lineage;
-    } catch (InternalException e) {
-      LOG.error("Error retrieving BE startup options. Shutting down JVM");
-      System.exit(1);
-    }
-  }
-
-  /**
-   * Resets this RuntimeEnv back to its machine-dependent state.
-   */
-  public void reset() {
-    numCores_ = Runtime.getRuntime().availableProcessors();
-  }
-
-  public int getNumCores() { return numCores_; }
-  public void setNumCores(int numCores) { this.numCores_ = numCores; }
-  public void setTestEnv(boolean v) { isTestEnv_ = v; }
-  public boolean isTestEnv() { return isTestEnv_; }
-  public boolean computeLineage() { return computeLineage_; }
-  public boolean isKuduSupported() {
-    return "true".equals(System.getenv("KUDU_IS_SUPPORTED"));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/TableAliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/TableAliasGenerator.java b/fe/src/main/java/com/cloudera/impala/common/TableAliasGenerator.java
deleted file mode 100644
index 6ded288..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/TableAliasGenerator.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import com.cloudera.impala.analysis.Analyzer;
-
-import com.google.common.base.Preconditions;
-
-public class TableAliasGenerator extends AliasGenerator {
-  private static final String DEFAULT_TBL_ALIAS_PREFIX = "$a$";
-
-  public TableAliasGenerator(Analyzer analyzer, String prefix) {
-    Preconditions.checkNotNull(analyzer);
-    aliasPrefix_ = prefix != null ? prefix : DEFAULT_TBL_ALIAS_PREFIX;
-    Analyzer currentAnalyzer = analyzer;
-    do {
-      usedAliases_.addAll(currentAnalyzer.getAliases());
-      usedAliases_.addAll(currentAnalyzer.getLocalViews().keySet());
-      currentAnalyzer = currentAnalyzer.getParentAnalyzer();
-    } while (currentAnalyzer != null);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/common/TreeNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/common/TreeNode.java b/fe/src/main/java/com/cloudera/impala/common/TreeNode.java
deleted file mode 100644
index ac8f297..0000000
--- a/fe/src/main/java/com/cloudera/impala/common/TreeNode.java
+++ /dev/null
@@ -1,182 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.common;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import com.cloudera.impala.util.Visitor;
-import com.google.common.base.Predicate;
-
-/**
- * Generic tree structure. Only concrete subclasses of this can be instantiated.
- */
-public abstract class TreeNode<NodeType extends TreeNode<NodeType>> {
-  protected ArrayList<NodeType> children_ = new ArrayList<NodeType>();
-
-  public NodeType getChild(int i) {
-    return hasChild(i) ? children_.get(i) : null;
-  }
-
-  public void addChild(NodeType n) {
-    children_.add(n);
-  }
-
-  public void removeChild(NodeType n) { children_.remove(n); }
-
-  public void clearChildren() { children_.clear(); }
-
-  public void addChildren(List<? extends NodeType> l) {
-    children_.addAll(l);
-  }
-
-  public boolean hasChild(int i) { return children_.size() > i; }
-  public void setChild(int index, NodeType n) { children_.set(index, n); }
-  public ArrayList<NodeType> getChildren() { return children_; }
-
-  /**
-   * Count the total number of nodes in this tree. Leaf node will return 1.
-   * Non-leaf node will include all its children.
-   */
-  public int numNodes() {
-    int numNodes = 1;
-    for (NodeType child: children_) numNodes += child.numNodes();
-    return numNodes;
-  }
-
-  /**
-   * Add all nodes in the tree that satisfy 'predicate' to the list 'matches'
-   * This node is checked first, followed by its children in order. If the node
-   * itself matches, the children are skipped.
-   */
-  public <C extends TreeNode<NodeType>, D extends C> void collect(
-      Predicate<? super C> predicate, Collection<D> matches) {
-    // TODO: the semantics of this function are very strange. contains()
-    // checks using .equals() on the nodes. In the case of literals, slotrefs
-    // and maybe others, two different tree node objects can be equal and
-    // this function would only return one of them. This is not intuitive.
-    // We rely on these semantics to not have duplicate nodes. Investigate this.
-    if (predicate.apply((C) this) && !matches.contains(this)) {
-      matches.add((D) this);
-      return;
-    }
-    for (NodeType child: children_) child.collect(predicate, matches);
-  }
-
-  /**
-   * Add all nodes in the tree that are of class 'cl' to the list 'matches'.
-   * This node is checked first, followed by its children in order. If the node
-   * itself is of class 'cl', the children are skipped.
-   */
-  public <C extends TreeNode<NodeType>, D extends C> void collect(
-      Class cl, Collection<D> matches) {
-    if (cl.equals(getClass())) {
-      matches.add((D) this);
-      return;
-    }
-    for (NodeType child: children_) child.collect(cl, matches);
-  }
-
-  /**
-   * Add all nodes in the tree that satisfy 'predicate' to the list 'matches'
-   * This node is checked first, followed by its children in order. All nodes
-   * that match in the subtree are added.
-   */
-  public <C extends TreeNode<NodeType>, D extends C> void collectAll(
-      Predicate<? super C> predicate, List<D> matches) {
-    if (predicate.apply((C) this)) matches.add((D) this);
-    for (NodeType child: children_) child.collectAll(predicate, matches);
-  }
-
-  /**
-   * For each expression in 'nodeList', collect all subexpressions satisfying 'predicate'
-   * into 'matches'
-   */
-  public static <C extends TreeNode<C>, D extends C> void collect(
-      Collection<C> nodeList, Predicate<? super C> predicate, Collection<D> matches) {
-    for (C node: nodeList) node.collect(predicate, matches);
-  }
-
-  /**
-   * For each expression in 'nodeList', collect all subexpressions of class 'cl'
-   * into 'matches'
-   */
-  public static <C extends TreeNode<C>, D extends C> void collect(
-      Collection<C> nodeList, Class cl, Collection<D> matches) {
-    for (C node: nodeList) node.collect(cl, matches);
-  }
-
-  /**
-   * Return true if this node or any of its children satisfy 'predicate'.
-   */
-  public <C extends TreeNode<NodeType>> boolean contains(
-      Predicate<? super C> predicate) {
-    if (predicate.apply((C) this)) return true;
-    for (NodeType child: children_) if (child.contains(predicate)) return true;
-    return false;
-  }
-
-  /**
-   * Return true if this node or any of its children is an instance of class 'cl'.
-   */
-  public boolean contains(Class cl) {
-    if (cl.equals(getClass())) return true;
-    for (NodeType child: children_) if (child.contains(cl)) return true;
-    return false;
-  }
-
-  /**
-   * For each node in nodeList, return true if any subexpression satisfies
-   * contains('predicate').
-   */
-  public static <C extends TreeNode<C>, D extends C> boolean contains(
-      Collection<C> nodeList, Predicate<? super C> predicate) {
-    for (C node: nodeList) if (node.contains(predicate)) return true;
-    return false;
-  }
-
-  /**
-   * Return true if any node in nodeList contains children of class cl.
-   */
-  public static <C extends TreeNode<C>> boolean contains(
-      List<C> nodeList, Class cl) {
-    for (C node: nodeList) if (node.contains(cl)) return true;
-    return false;
-  }
-
-  /**
-   * Returns the first node/child of class cl (depth-first traversal).
-   */
-  public <C extends NodeType> C findFirstOf(Class<C> cl) {
-    if (this.getClass().equals(cl)) return (C) this;
-    for (NodeType child: children_) {
-      NodeType result = child.findFirstOf(cl);
-      if (result != null) return (C) result;
-    }
-    return null;
-  }
-
-  /**
-   * Visitor pattern accept method
-   */
-  public <C extends TreeNode<NodeType>> void accept(Visitor<C> visitor) {
-    visitor.visit((C) this);
-    for (NodeType p: children_) p.accept(visitor);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/extdatasource/ApiVersion.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/extdatasource/ApiVersion.java b/fe/src/main/java/com/cloudera/impala/extdatasource/ApiVersion.java
deleted file mode 100644
index d2285a4..0000000
--- a/fe/src/main/java/com/cloudera/impala/extdatasource/ApiVersion.java
+++ /dev/null
@@ -1,53 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource;
-
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.google.common.base.Strings;
-
-/**
- * Enumerates the valid versions of the {@link ExternalDataSource} API.
- */
-public enum ApiVersion {
-  V1(com.cloudera.impala.extdatasource.v1.ExternalDataSource.class);
-
-  private final Class<?> apiInterface_;
-
-  ApiVersion(Class<?> interfaceClass) {
-    apiInterface_ = interfaceClass;
-  }
-
-  /**
-   * Gets the {@link Class} for the interface this API version represents.
-   */
-  public Class<?> getApiInterface() { return apiInterface_; }
-
-  /**
-   * Parses the API version from the string. Is case-insensitive.
-   * @return The value of the ApiVersion enum represented by the string or null
-   *         if the string is not a valid ApiVersion.
-   */
-  public static ApiVersion parseApiVersion(String apiVersionString) {
-    if (Strings.isNullOrEmpty(apiVersionString)) return null;
-    try {
-      return valueOf(apiVersionString.toUpperCase());
-    } catch (IllegalArgumentException ex) {
-      return null;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/extdatasource/ExternalDataSourceExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/extdatasource/ExternalDataSourceExecutor.java b/fe/src/main/java/com/cloudera/impala/extdatasource/ExternalDataSourceExecutor.java
deleted file mode 100644
index 3af470f..0000000
--- a/fe/src/main/java/com/cloudera/impala/extdatasource/ExternalDataSourceExecutor.java
+++ /dev/null
@@ -1,267 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.extdatasource;
-
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.Map;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TStatus;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Wraps and executes an ExternalDataSource specified in an external jar. Used
- * in planning to call prepare() and in the backend to fetch results. The
- * executor takes the API version and abstracts the versioning from the caller,
- * e.g. calling the correct API interface and massaging any parameters that can
- * be handled here. There are thrift structures for all param and return types
- * representing the necessary structures. If future versions of the API are
- * added, the executor should be updated to call the appropriate API and handle
- * any differences. It is assumed that the API is updated in a way that
- * backwards compatibility is possible.
- */
-public class ExternalDataSourceExecutor {
-  private final static Logger LOG = LoggerFactory.getLogger(
-      ExternalDataSourceExecutor.class);
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-
-  // Init string prefix used to indicate if the class should be cached. When this
-  // is specified, the Class is loaded and initialized at most once. Instances of
-  // the cached Class are still created for every query.
-  private final static String CACHE_CLASS_PREFIX = "CACHE_CLASS::";
-
-  // Map of class name to cached ExternalDataSource classes.
-  // Protected by cachedClassesLock_.
-  private final static Map<String, Class<?>> cachedClasses_ =
-      Maps.newHashMap();
-
-  // Number of cache hits/misses in cachedClasses_. Protected by cachedClassesLock_.
-  private static long numClassCacheHits_ = 0;
-  private static long numClassCacheMisses_ = 0;
-
-  // Protects cachedClasses_, numClassCacheHits_, and numClassCacheMisses_.
-  private final static Object cachedClassesLock_ = new Object();
-
-  private final ApiVersion apiVersion_;
-  private final ExternalDataSource dataSource_;
-  private final String jarPath_;
-  private final String className_;
-  private final String initString_;
-
-  public static long getNumClassCacheHits() {
-    synchronized (cachedClassesLock_) {
-      return numClassCacheHits_;
-    }
-  }
-
-  public static long getNumClassCacheMisses() {
-    synchronized (cachedClassesLock_) {
-      return numClassCacheMisses_;
-    }
-  }
-
-  /**
-   * @param jarPath The local path to the jar containing the ExternalDataSource.
-   * @param className The name of the class implementing the ExternalDataSource.
-   * @param apiVersionStr The API version the ExternalDataSource implements.
-   *                         Must be a valid value of {@link ApiVersion}.
-   * @param initString The init string registered with this data source.
-   */
-  public ExternalDataSourceExecutor(String jarPath, String className,
-      String apiVersionStr, String initString) throws ImpalaException {
-    Preconditions.checkNotNull(jarPath);
-
-    apiVersion_ = ApiVersion.valueOf(apiVersionStr);
-    if (apiVersion_ == null) {
-      throw new ImpalaRuntimeException("Invalid API version: " + apiVersionStr);
-    }
-    jarPath_ = jarPath;
-    className_ = className;
-    initString_ = initString;
-
-    try {
-      Class<?> c = getDataSourceClass();
-      Constructor<?> ctor = c.getConstructor();
-      dataSource_ = (ExternalDataSource) ctor.newInstance();
-    } catch (Exception ex) {
-      throw new ImpalaRuntimeException(String.format("Unable to load external data " +
-          "source library from path=%s className=%s apiVersion=%s", jarPath,
-          className, apiVersionStr), ex);
-    }
-  }
-
-  /**
-   * Returns the ExternalDataSource class, loading the jar if necessary. The
-   * class is cached if initString_ starts with CACHE_CLASS_PREFIX.
-   */
-  private Class<?> getDataSourceClass() throws Exception {
-    Class<?> c = null;
-    // Cache map key needs to contain both the class name and init string in case
-    // the same class is used for multiple tables where some are cached and others
-    // are not.
-    String cacheMapKey = String.format("%s.%s", className_, initString_);
-    synchronized (cachedClassesLock_) {
-      c = cachedClasses_.get(cacheMapKey);
-      if (c == null) {
-        URL url = new File(jarPath_).toURI().toURL();
-        URLClassLoader loader = URLClassLoader.newInstance(
-            new URL[] { url }, getClass().getClassLoader());
-        c = Class.forName(className_, true, loader);
-        if (!ArrayUtils.contains(c.getInterfaces(), apiVersion_.getApiInterface())) {
-          throw new ImpalaRuntimeException(String.format(
-              "Class '%s' does not implement interface '%s' required for API version %s",
-              className_, apiVersion_.getApiInterface().getName(), apiVersion_.name()));
-        }
-        // Only cache the class if the init string starts with CACHE_CLASS_PREFIX
-        if (initString_ != null && initString_.startsWith(CACHE_CLASS_PREFIX)) {
-          cachedClasses_.put(cacheMapKey, c);
-        }
-        LOG.info("Loaded jar for class {} at path {}", className_, jarPath_);
-        numClassCacheMisses_++;
-      } else {
-        numClassCacheHits_++;
-      }
-    }
-    return c;
-  }
-
-  public byte[] prepare(byte[] thriftParams) throws ImpalaException {
-    TPrepareParams params = new TPrepareParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
-    TPrepareResult result = prepare(params);
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage(), e);
-    }
-  }
-
-  public byte[] open(byte[] thriftParams) throws ImpalaException {
-    TOpenParams params = new TOpenParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
-    TOpenResult result = open(params);
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage(), e);
-    }
-  }
-
-  public byte[] getNext(byte[] thriftParams) throws ImpalaException {
-    TGetNextParams params = new TGetNextParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
-    TGetNextResult result = getNext(params);
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage(), e);
-    }
-  }
-
-  public byte[] close(byte[] thriftParams) throws ImpalaException {
-    TCloseParams params = new TCloseParams();
-    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
-    TCloseResult result = close(params);
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage(), e);
-    }
-  }
-
-  // Helper method to log the exception to capture the stack and return an error TStatus
-  private TStatus logAndMakeErrorStatus(String opName, Exception e) {
-    String exceptionMessage = e.getMessage();
-    if (exceptionMessage == null) {
-      exceptionMessage = "No error message returned by data source. Check the " +
-          "impalad log for more information.";
-    }
-    String errorMessage = String.format(
-        "Error in data source (path=%s, class=%s, version=%s) %s: %s",
-        jarPath_, className_, apiVersion_.name(), opName,
-        exceptionMessage);
-    LOG.error(errorMessage, e); // Logs the stack
-    return new TStatus(TErrorCode.RUNTIME_ERROR, Lists.newArrayList(errorMessage));
-  }
-
-  public TPrepareResult prepare(TPrepareParams params) {
-    try {
-      TPrepareResult result = dataSource_.prepare(params);
-      result.validate();
-      return result;
-    } catch (Exception e) {
-      return new TPrepareResult(logAndMakeErrorStatus("prepare()", e));
-    }
-  }
-
-  public TOpenResult open(TOpenParams params) {
-    try {
-      TOpenResult result = dataSource_.open(params);
-      result.validate();
-      return result;
-    } catch (Exception e) {
-      return new TOpenResult(logAndMakeErrorStatus("open()", e));
-    }
-  }
-
-  public TGetNextResult getNext(TGetNextParams params) {
-    try {
-      TGetNextResult result = dataSource_.getNext(params);
-      result.validate();
-      return result;
-    } catch (Exception e) {
-      return new TGetNextResult(logAndMakeErrorStatus("getNext()", e));
-    }
-  }
-
-  public TCloseResult close(TCloseParams params) {
-    try {
-      TCloseResult result = dataSource_.close(params);
-      result.validate();
-      return result;
-    } catch (Exception e) {
-      return new TCloseResult(logAndMakeErrorStatus("close()", e));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBigIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBigIntWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBigIntWritable.java
deleted file mode 100644
index f410ddf..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBigIntWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.LongWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaBigIntWritable extends LongWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaBigIntWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public long get() { return UnsafeUtil.UNSAFE.getLong(ptr_); }
-
-  @Override
-  public void set(long v) { UnsafeUtil.UNSAFE.putLong(ptr_, v); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBooleanWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBooleanWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBooleanWritable.java
deleted file mode 100644
index f20f49e..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBooleanWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.BooleanWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaBooleanWritable extends BooleanWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaBooleanWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public boolean get() { return UnsafeUtil.UNSAFE.getByte(ptr_) != 0; }
-
-  @Override
-  public void set(boolean v) { UnsafeUtil.UNSAFE.putByte(ptr_, v ? (byte)1 : 0); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBytesWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBytesWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBytesWritable.java
deleted file mode 100644
index ce6fc0c..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaBytesWritable.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.BytesWritable;
-
-/**
- * Impala writable type that implements the BytesWritable interface. The data
- * marshalling is handled by the underlying {@link ImpalaStringWritable} object.
- */
-public class ImpalaBytesWritable extends BytesWritable {
-  private final ImpalaStringWritable string_;
-
-  public ImpalaBytesWritable(long ptr) {
-    string_ = new ImpalaStringWritable(ptr);
-  }
-
-  @Override
-  public byte[] copyBytes() {
-    byte[] src = getBytes();
-    return src.clone();
-  }
-
-  @Override
-  public byte[] get() { return getBytes(); }
-  @Override
-  public byte[] getBytes() { return string_.getBytes(); }
-  @Override
-  public int getCapacity() { return string_.getCapacity(); }
-  @Override
-  public int getLength() { return string_.getLength(); }
-
-  public ImpalaStringWritable getStringWritable() { return string_; }
-
-  @Override
-  public void set(byte[] v, int offset, int len) { string_.set(v, offset, len); }
-  @Override
-  public void setCapacity(int newCap) { string_.setCapacity(newCap); }
-  @Override
-  public void setSize(int size) { string_.setSize(size); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaDoubleWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaDoubleWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaDoubleWritable.java
deleted file mode 100644
index cc96895..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaDoubleWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaDoubleWritable extends DoubleWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaDoubleWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public double get() { return UnsafeUtil.UNSAFE.getDouble(ptr_); }
-
-  @Override
-  public void set(double v) { UnsafeUtil.UNSAFE.putDouble(ptr_, v); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaFloatWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaFloatWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaFloatWritable.java
deleted file mode 100644
index b68ee69..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaFloatWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.FloatWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaFloatWritable extends FloatWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaFloatWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public float get() { return UnsafeUtil.UNSAFE.getFloat(ptr_); }
-
-  @Override
-  public void set(float v) { UnsafeUtil.UNSAFE.putFloat(ptr_, v); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaIntWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaIntWritable.java
deleted file mode 100644
index afe59c4..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaIntWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.IntWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaIntWritable extends IntWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaIntWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public int get() { return UnsafeUtil.UNSAFE.getInt(ptr_); }
-
-  @Override
-  public void set(int v) { UnsafeUtil.UNSAFE.putInt(ptr_, v); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaSmallIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaSmallIntWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaSmallIntWritable.java
deleted file mode 100644
index 5a8e346..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaSmallIntWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.hive.serde2.io.ShortWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaSmallIntWritable extends ShortWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaSmallIntWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public short get() { return UnsafeUtil.UNSAFE.getShort(ptr_); }
-
-  @Override
-  public void set(short v) { UnsafeUtil.UNSAFE.putShort(ptr_, v); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaStringWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaStringWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaStringWritable.java
deleted file mode 100644
index a9dca74..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaStringWritable.java
+++ /dev/null
@@ -1,117 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import java.nio.ByteBuffer;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-/**
- * Underlying class for Text and Bytes writable. This class understands marshalling
- * values that map to StringValue in the BE.
- * StringValue is replicated here:
- * struct StringValue {
- *   char* ptr;
- *   int len;
- * };
- */
-public class ImpalaStringWritable {
-  // The length is 8 bytes into the struct.
-  static public final int STRING_VALUE_LEN_OFFSET = 8;
-
-  // Ptr (to native heap) where the value should be read from and written to.
-  // This needs to be ABI compatible with the BE StringValue class
-  private final long stringValPtr_;
-
-  // Array object to convert between native and java heap (i.e. byte[]).
-  private ByteBuffer array_;
-
-  // Set if this object had to allocate from the native heap on the java side. If this
-  // is set, it will always be stringValPtr_->ptr
-  // We only need to allocate from the java side if we are trying to set the
-  // StringValue to a bigger size than what the native side allocated.
-  // If this object is used as a read-only input argument, this value will stay
-  // 0.
-  private long bufferPtr_;
-
-  // Allocation size of stringValPtr_'s ptr.
-  private int bufferCapacity_;
-
-  // Creates a string writable backed by a StringValue object. Ptr must be a valid
-  // StringValue (in the native heap).
-  public ImpalaStringWritable(long ptr) {
-    stringValPtr_ = ptr;
-    bufferPtr_= 0;
-    bufferCapacity_ = getLength();
-    array_ = ByteBuffer.allocate(0);
-  }
-
-  /*
-   * Implement finalize() to clean up any allocations from the native heap.
-   */
-  @Override
-  protected void finalize() throws Throwable {
-    UnsafeUtil.UNSAFE.freeMemory(bufferPtr_);
-    super.finalize();
-  }
-
-  // Returns the underlying bytes as a byte[]
-  public byte[] getBytes() {
-    int len = getLength();
-    // TODO: reuse this array.
-    array_ = ByteBuffer.allocate(len);
-    byte[] buffer = array_.array();
-
-    long srcPtr = UnsafeUtil.UNSAFE.getLong(stringValPtr_);
-    UnsafeUtil.Copy(buffer, 0, srcPtr, len);
-    return buffer;
-  }
-
-  // Returns the capacity of the underlying array
-  public int getCapacity() {
-    return bufferCapacity_;
-  }
-
-  // Updates the new capacity. No-op if the new capacity is smaller.
-  public void setCapacity(int newCap) {
-    if (newCap <= bufferCapacity_) return;
-    bufferPtr_ = UnsafeUtil.UNSAFE.reallocateMemory(bufferPtr_, newCap);
-    UnsafeUtil.UNSAFE.putLong(stringValPtr_, bufferPtr_);
-    bufferCapacity_ = newCap;
-  }
-
-  // Returns the length of the string
-  public int getLength() {
-    return UnsafeUtil.UNSAFE.getInt(stringValPtr_ + STRING_VALUE_LEN_OFFSET);
-  }
-
-  // Updates the length of the string. If the new length is bigger,
-  // the additional bytes are undefined.
-  public void setSize(int s) {
-    setCapacity(s);
-    UnsafeUtil.UNSAFE.putInt(stringValPtr_ + 8, s);
-  }
-
-  // Sets (v[offset], len) to the underlying buffer, growing it as necessary.
-  public void set(byte[] v, int offset, int len) {
-    setSize(len);
-    long strPtr = UnsafeUtil.UNSAFE.getLong(stringValPtr_);
-    UnsafeUtil.Copy(strPtr, v, offset, len);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTextWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTextWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTextWritable.java
deleted file mode 100644
index e17a4df..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTextWritable.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * Impala writable type that implements the Text interface. The data marshalling is
- * handled by the underlying {@link ImpalaStringWritable} object.
- */
-public class ImpalaTextWritable extends Text {
-  private final ImpalaStringWritable string_;
-
-  public ImpalaTextWritable(long ptr) {
-    string_ = new ImpalaStringWritable(ptr);
-  }
-
-  @Override
-  public String toString() { return new String(getBytes()); }
-  @Override
-  public byte[] getBytes() { return string_.getBytes(); }
-  @Override
-  public int getLength() { return string_.getLength(); }
-
-  public ImpalaStringWritable getStringWritable() { return string_; }
-
-  @Override
-  public void set(byte[] v, int offset, int len) {
-    string_.set(v, offset, len);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTinyIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTinyIntWritable.java b/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTinyIntWritable.java
deleted file mode 100644
index 17a7b14..0000000
--- a/fe/src/main/java/com/cloudera/impala/hive/executor/ImpalaTinyIntWritable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.hive.executor;
-
-import org.apache.hadoop.hive.serde2.io.ByteWritable;
-
-import com.cloudera.impala.util.UnsafeUtil;
-
-@SuppressWarnings("restriction")
-public class ImpalaTinyIntWritable extends ByteWritable {
-  // Ptr (to native heap) where the value should be read from and written to.
-  private final long ptr_;
-
-  public ImpalaTinyIntWritable(long ptr) {
-    ptr_ = ptr;
-  }
-
-  @Override
-  public byte get() { return UnsafeUtil.UNSAFE.getByte(ptr_); }
-
-  @Override
-  public void set(byte v) { UnsafeUtil.UNSAFE.putByte(ptr_, v); }
-}


[12/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/AvroSchemaParser.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaParser.java b/fe/src/main/java/com/cloudera/impala/util/AvroSchemaParser.java
deleted file mode 100644
index 60b0c7a..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaParser.java
+++ /dev/null
@@ -1,204 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import static org.apache.avro.Schema.Type.BOOLEAN;
-import static org.apache.avro.Schema.Type.DOUBLE;
-import static org.apache.avro.Schema.Type.FLOAT;
-import static org.apache.avro.Schema.Type.INT;
-import static org.apache.avro.Schema.Type.LONG;
-import static org.apache.avro.Schema.Type.STRING;
-
-import java.util.Collections;
-import java.util.Hashtable;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.avro.Schema;
-import org.apache.avro.SchemaParseException;
-import org.codehaus.jackson.JsonNode;
-
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.TypeDef;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Utility class used to parse Avro schema. Checks that the schema is valid
- * and performs mapping of Avro types to Impala types.
- * Note: This code is loosely based off the parsing code in the Hive AvroSerDe.
- */
-public class AvroSchemaParser {
-  // Map of Avro to Impala primitive types.
-  private static final Map<Schema.Type, Type> avroToImpalaPrimitiveTypeMap_;
-  static {
-    Map<Schema.Type, Type> typeMap = new Hashtable<Schema.Type, Type>();
-    typeMap.put(STRING, Type.STRING);
-    typeMap.put(INT, Type.INT);
-    typeMap.put(BOOLEAN, Type.BOOLEAN);
-    typeMap.put(LONG, Type.BIGINT);
-    typeMap.put(FLOAT, Type.FLOAT);
-    typeMap.put(DOUBLE, Type.DOUBLE);
-    avroToImpalaPrimitiveTypeMap_ = Collections.unmodifiableMap(typeMap);
-  }
-
-  /**
-   * Parses the Avro schema string literal, mapping the Avro types to Impala types.
-   * Returns a list of ColumnDef objects with their name and type info set.
-   * Throws an AnalysisException if the Avro type maps to a type that Impala
-   * does not yet support.
-   * Throws a SchemaParseException if the Avro schema was invalid.
-   */
-  public static List<ColumnDef> parse(String schemaStr)
-      throws SchemaParseException, AnalysisException {
-    Schema.Parser avroSchemaParser = new Schema.Parser();
-    Schema schema = avroSchemaParser.parse(schemaStr);
-    if (!schema.getType().equals(Schema.Type.RECORD)) {
-      throw new UnsupportedOperationException("Schema for table must be of type " +
-          "RECORD. Received type: " + schema.getType());
-    }
-    List<ColumnDef> colDefs = Lists.newArrayListWithCapacity(schema.getFields().size());
-    for (Schema.Field field: schema.getFields()) {
-      ColumnDef colDef = new ColumnDef(field.name(),
-          new TypeDef(getTypeInfo(field.schema(), field.name())), field.doc());
-      colDef.analyze();
-      colDefs.add(colDef);
-    }
-    return colDefs;
-  }
-
-  /**
-   * Parses the given Avro schema and returns the matching Impala type
-   * for this field. Handles primitive and complex types.
-   */
-  private static Type getTypeInfo(Schema schema, String colName)
-      throws AnalysisException {
-    // Avro requires NULLable types to be defined as unions of some type T
-    // and NULL.  This is annoying and we're going to hide it from the user.
-    if (isNullableType(schema)) {
-      return getTypeInfo(getColumnType(schema), colName);
-    }
-
-    Schema.Type type = schema.getType();
-    if (avroToImpalaPrimitiveTypeMap_.containsKey(type)) {
-      return avroToImpalaPrimitiveTypeMap_.get(type);
-    }
-
-    switch(type) {
-      case ARRAY:
-        Type itemType = getTypeInfo(schema.getElementType(), colName);
-        return new ArrayType(itemType);
-      case MAP:
-        Type valueType = getTypeInfo(schema.getValueType(), colName);
-        return new MapType(Type.STRING, valueType);
-      case RECORD:
-        StructType structType = new StructType();
-        for (Schema.Field field: schema.getFields()) {
-          Type fieldType = getTypeInfo(field.schema(), colName);
-          structType.addField(new StructField(field.name(), fieldType, field.doc()));
-        }
-        return structType;
-      case BYTES:
-        // Decimal is stored in Avro as a BYTE.
-        Type decimalType = getDecimalType(schema);
-        if (decimalType != null) return decimalType;
-      // TODO: Add support for stored Avro UNIONs by exposing them as STRUCTs in Impala.
-      case UNION:
-      case ENUM:
-      case FIXED:
-      case NULL:
-      default: {
-        throw new AnalysisException(String.format(
-            "Unsupported type '%s' of column '%s'", type.getName(), colName));
-      }
-    }
-  }
-
-  /**
-   * Returns true if this is a nullable type (a Union[T, Null]), false otherwise.
-   */
-  private static boolean isNullableType(Schema schema) {
-    // [null, null] not allowed, so this check is ok.
-    return schema.getType().equals(Schema.Type.UNION) && schema.getTypes().size() == 2 &&
-        (schema.getTypes().get(0).getType().equals(Schema.Type.NULL) ||
-         schema.getTypes().get(1).getType().equals(Schema.Type.NULL));
-  }
-
-  /**
-   * If a nullable type, get the schema for the non-nullable type which will
-   * provide Impala column type information.
-   */
-  private static Schema getColumnType(Schema schema) {
-    List<Schema> types = schema.getTypes();
-    return types.get(0).getType().equals(Schema.Type.NULL) ? types.get(1) : types.get(0);
-  }
-
-  /**
-   * Attempts to parse decimal type information from the Avro schema, returning
-   * a decimal ColumnType if successful or null if this schema does not map
-   * to a decimal type.
-   * Decimal is defined in Avro as a BYTE type with the logicalType property
-   * set to "decimal" and a specified scale/precision.
-   * Throws a SchemaParseException if the logicType=decimal, but scale/precision
-   * is not specified or in the incorrect format.
-   */
-  private static Type getDecimalType(Schema schema) {
-    Preconditions.checkState(schema.getType() == Schema.Type.BYTES);
-    String logicalType = schema.getProp("logicalType");
-    if (logicalType != null && logicalType.equalsIgnoreCase("decimal")) {
-      // Parse the scale/precision of the decimal type.
-      Integer scale = getDecimalProp(schema, "scale");
-      // The Avro spec states that scale should default to zero if not set.
-      if (scale == null) scale = 0;
-
-      // Precision is a required property according to the Avro spec.
-      Integer precision = getDecimalProp(schema, "precision");
-      if (precision == null) {
-        throw new SchemaParseException(
-            "No 'precision' property specified for 'decimal' logicalType");
-      }
-      return ScalarType.createDecimalType(precision, scale);
-    }
-    return null;
-  }
-
-  /**
-   * Parses a decimal property and returns the value as an integer, or null
-   * if the property isn't set. Used to parse decimal scale/precision.
-   * Throws a SchemaParseException if the property doesn't parse to a
-   * natural number.
-   */
-  private static Integer getDecimalProp(Schema schema, String propName)
-      throws SchemaParseException {
-    JsonNode node = schema.getJsonProp(propName);
-    if (node == null) return null;
-    int propValue = node.getValueAsInt(-1);
-    if (propValue < 0) {
-      throw new SchemaParseException(String.format("Invalid decimal '%s' " +
-          "property value: %s", propName, node.getValueAsText()));
-    }
-    return propValue;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/AvroSchemaUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaUtils.java b/fe/src/main/java/com/cloudera/impala/util/AvroSchemaUtils.java
deleted file mode 100644
index f86c347..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/AvroSchemaUtils.java
+++ /dev/null
@@ -1,189 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
-
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Contains utility functions for dealing with Avro schemas.
- */
-public class AvroSchemaUtils {
-
-  /**
-   * Gets an Avro table's JSON schema from the list of given table property search
-   * locations. The schema may be specified as a string literal or provided as a
-   * Hadoop FileSystem or http URL that points to the schema. Apart from ensuring
-   * that the JSON schema is not SCHEMA_NONE, this function does not perform any
-   * additional validation on the returned string (e.g., it may not be a valid
-   * schema). Returns the Avro schema or null if none was specified in the search
-   * locations. Throws an AnalysisException if a schema was specified, but could not
-   * be retrieved, e.g., because of an invalid URL.
-   */
-  public static String getAvroSchema(List<Map<String, String>> schemaSearchLocations)
-      throws AnalysisException {
-    String url = null;
-    // Search all locations and break out on the first valid schema found.
-    for (Map<String, String> schemaLocation: schemaSearchLocations) {
-      if (schemaLocation == null) continue;
-
-      String literal =
-          schemaLocation.get(
-              AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName());
-      if (literal != null && !literal.equals(AvroSerdeUtils.SCHEMA_NONE)) return literal;
-
-      url = schemaLocation.get(
-          AvroSerdeUtils.AvroTableProperties.SCHEMA_URL.getPropName());
-      if (url != null && !url.equals(AvroSerdeUtils.SCHEMA_NONE)) {
-        url = url.trim();
-        break;
-      }
-    }
-    if (url == null) return null;
-
-    String schema = null;
-    InputStream urlStream = null;
-    try {
-      // TODO: Add support for https:// here.
-      if (url.toLowerCase().startsWith("http://")) {
-        urlStream = new URL(url).openStream();
-        schema = IOUtils.toString(urlStream);
-      } else {
-        Path path = new Path(url);
-        FileSystem fs = null;
-        fs = path.getFileSystem(FileSystemUtil.getConfiguration());
-        if (!fs.exists(path)) {
-          throw new AnalysisException(String.format(
-              "Invalid avro.schema.url: %s. Path does not exist.", url));
-        }
-        schema = FileSystemUtil.readFile(path);
-      }
-    } catch (AnalysisException e) {
-      throw e;
-    } catch (IOException e) {
-      throw new AnalysisException(String.format(
-          "Failed to read Avro schema at: %s. %s ", url, e.getMessage()));
-    } catch (Exception e) {
-      throw new AnalysisException(String.format(
-          "Invalid avro.schema.url: %s. %s", url, e.getMessage()));
-    } finally {
-      if (urlStream != null) IOUtils.closeQuietly(urlStream);
-    }
-    return schema;
-  }
-
-  /**
-   * Reconciles differences in names/types between the given list of column definitions
-   * and the column definitions corresponding to an Avro Schema. Populates 'warning'
-   * if there are inconsistencies between the column definitions and the Avro schema,
-   * Returns the reconciled column definitions according to the following conflict
-   * resolution policy:
-   *
-   * Mismatched number of columns -> Prefer Avro columns.
-   * Always prefer Avro schema except for column type CHAR/VARCHAR/STRING:
-   *   A CHAR/VARCHAR/STRING column definition maps to an Avro STRING. The reconciled
-   *   column will preserve the type in the column definition but use the column name
-   *   and comment from the Avro schema.
-   */
-  public static List<ColumnDef> reconcileSchemas(
-      List<ColumnDef> colDefs, List<ColumnDef> avroCols, StringBuilder warning) {
-    if (colDefs.size() != avroCols.size()) {
-      warning.append(String.format(
-          "Ignoring column definitions in favor of Avro schema.\n" +
-          "The Avro schema has %s column(s) but %s column definition(s) were given.",
-           avroCols.size(), colDefs.size()));
-      return avroCols;
-    }
-
-    List<ColumnDef> result = Lists.newArrayListWithCapacity(colDefs.size());
-    for (int i = 0; i < avroCols.size(); ++i) {
-      ColumnDef colDef = colDefs.get(i);
-      ColumnDef avroCol = avroCols.get(i);
-      Preconditions.checkNotNull(colDef.getType());
-      Preconditions.checkNotNull(avroCol.getType());
-
-      // A CHAR/VARCHAR/STRING column definition maps to an Avro STRING, and is preserved
-      // as a CHAR/VARCHAR/STRING in the reconciled schema. Column name and comment
-      // are taken from the Avro schema.
-      if ((colDef.getType().isStringType() && avroCol.getType().isStringType())) {
-        Preconditions.checkState(
-            avroCol.getType().getPrimitiveType() == PrimitiveType.STRING);
-        ColumnDef reconciledColDef = new ColumnDef(
-            avroCol.getColName(), colDef.getTypeDef(), avroCol.getComment());
-        try {
-          reconciledColDef.analyze();
-        } catch (AnalysisException e) {
-          Preconditions.checkNotNull(
-              null, "reconciledColDef.analyze() should never throw.");
-        }
-        result.add(reconciledColDef);
-      } else {
-        result.add(avroCol);
-      }
-
-      // Populate warning string if there are name and/or type inconsistencies.
-      if (!colDef.getColName().equals(avroCol.getColName()) ||
-          !colDef.getType().equals(avroCol.getType())) {
-        if (warning.length() == 0) {
-          // Add warning preamble for the first mismatch.
-          warning.append("Resolved the following name and/or type inconsistencies " +
-              "between the column definitions and the Avro schema.\n");
-        }
-        warning.append(String.format("Column definition at position %s:  %s %s\n",
-            i, colDefs.get(i).getColName(), colDefs.get(i).getType().toSql()));
-        warning.append(String.format("Avro schema column at position %s: %s %s\n",
-            i, avroCols.get(i).getColName(), avroCols.get(i).getType().toSql()));
-        warning.append(String.format("Resolution at position %s: %s %s\n",
-            i, result.get(i).getColName(), result.get(i).getType().toSql()));
-      }
-    }
-    Preconditions.checkState(result.size() == avroCols.size());
-    Preconditions.checkState(result.size() == colDefs.size());
-    return result;
-  }
-
-  /**
-   * Sets the comment of each column definition to 'from deserializer' if not already
-   * set. The purpose of this function is to provide behavioral consistency with
-   * Hive ('deserializer' is not applicable to Impala) with respect to column comments
-   * set for Avro tables.
-   */
-  public static void setFromSerdeComment(List<ColumnDef> colDefs) {
-    for (ColumnDef colDef: colDefs) {
-      if (Strings.isNullOrEmpty(colDef.getComment())) {
-        colDef.setComment("from deserializer");
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/DisjointSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/DisjointSet.java b/fe/src/main/java/com/cloudera/impala/util/DisjointSet.java
deleted file mode 100644
index bce214e..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/DisjointSet.java
+++ /dev/null
@@ -1,142 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Basic implementation of the disjoint-set data structure.
- * Stores a set of disjoint item sets and provides efficient implementations of mainly
- * two operations:
- * 1. Find the item set corresponding to a given member item (get() function)
- * 2. Compute the union of two item sets (union() function)
- */
-public class DisjointSet<T> {
-  // Maps from an item to its item set.
-  private final Map<T, Set<T>> itemSets_ = Maps.newHashMap();
-  private final Set<Set<T>> uniqueSets_ = Sets.newHashSet();
-
-  /**
-   * Returns the item set corresponding to the given item or null if it
-   * doesn't exist.
-   */
-  public Set<T> get(T item) { return itemSets_.get(item); }
-
-  public Set<Set<T>> getSets() { return uniqueSets_; }
-
-  /**
-   * Registers a new item set with a single item. Returns the new item set.
-   * Throws if such an item set already exists.
-   */
-  public Set<T> makeSet(T item) {
-    if (itemSets_.containsKey(item)) {
-      throw new IllegalStateException(
-          "Item set for item already exists: " + item.toString());
-    }
-    Set<T> s = Sets.newHashSet(item);
-    itemSets_.put(item, s);
-    uniqueSets_.add(s);
-    return s;
-  }
-
-  /**
-   * Merges the two item sets belonging to the members a and b. The merged set contains
-   * at least a and b even if a or b did not have an associated item set.
-   * Returns false if the item sets of a and b are non-empty and already identical,
-   * true otherwise.
-   */
-  public boolean union(T a, T b) {
-    Set<T> aItems = itemSets_.get(a);
-    Set<T> bItems = itemSets_.get(b);
-    // check if the sets are already identical
-    if (aItems != null && bItems != null && aItems == bItems) return false;
-
-    // union(x, x) is equivalent to makeSet(x)
-    if (a.equals(b) && aItems == null) {
-      makeSet(a);
-      return true;
-    }
-
-    // create sets for a or b if not present already
-    if (aItems == null) aItems = makeSet(a);
-    if (bItems == null) bItems = makeSet(b);
-
-    // will contain the union of aItems and bItems
-    Set<T> mergedItems = aItems;
-    // always the smaller of the two sets to be merged
-    Set<T> updateItems = bItems;
-    if (bItems.size() > aItems.size()) {
-      mergedItems = bItems;
-      updateItems = aItems;
-    }
-    for (T item: updateItems) {
-      mergedItems.add(item);
-      itemSets_.put(item, mergedItems);
-    }
-    uniqueSets_.remove(updateItems);
-    return true;
-  }
-
-  /**
-   * Merges all the item sets corresponding to the given items. Returns true if any item
-   * sets were merged or created, false otherwise (item sets are already identical).
-   */
-  public boolean bulkUnion(Collection<T> items) {
-    if (items.isEmpty()) return false;
-    Iterator<T> it = items.iterator();
-    T head = it.next();
-    // bulkUnion(x) is equivalent to makeSet(x)
-    if (!it.hasNext()) {
-      if (get(head) != null) return false;
-      makeSet(head);
-      return true;
-    }
-    boolean result = false;
-    while(it.hasNext()) {
-      boolean changed = union(head, it.next());
-      result = result || changed;
-    }
-    return result;
-  }
-
-  /**
-   * Checks the internal consistency of this data structure.
-   * Throws an IllegalStateException if an inconsistency is detected.
-   */
-  public void checkConsistency() {
-    Set<Set<T>> validatedSets = Sets.newHashSet();
-    for (Set<T> itemSet: itemSets_.values()) {
-      // Avoid checking the same item set multiple times.
-      if (validatedSets.contains(itemSet)) continue;
-      // Validate that all items in this set are properly mapped to
-      // the set itself.
-      for (T item: itemSet) {
-        if (itemSet != itemSets_.get(item)) {
-          throw new IllegalStateException("DisjointSet is in an inconsistent state.");
-        }
-      }
-      validatedSets.add(itemSet);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/EventSequence.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/EventSequence.java b/fe/src/main/java/com/cloudera/impala/util/EventSequence.java
deleted file mode 100644
index 6b12c2e..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/EventSequence.java
+++ /dev/null
@@ -1,58 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-
-import com.cloudera.impala.thrift.TEventSequence;
-
-import com.google.common.collect.Lists;
-
-/**
- * Wrapper around TEventSequence so that we can mark events with a single method call.
- * Events are 'marked' as they happen (so in order, with no time-travel backwards).
- */
-public class EventSequence {
-  private final List<Long> timestamps_ = Lists.newArrayList();
-  private final List<String> labels_ = Lists.newArrayList();
-
-  private final long startTime_;
-  private final String name_;
-
-  public EventSequence(String name) {
-    name_ = name;
-    startTime_ = System.nanoTime();
-  }
-
-  /**
-   * Saves an event at the current time with the given label.
-   */
-  public void markEvent(String label) {
-    // Timestamps should be in ns resolution
-    timestamps_.add(System.nanoTime() - startTime_);
-    labels_.add(label);
-  }
-
-  public TEventSequence toThrift() {
-    TEventSequence ret = new TEventSequence();
-    ret.timestamps = timestamps_;
-    ret.labels = labels_;
-    ret.name = name_;
-    return ret;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/FileWatchService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/FileWatchService.java b/fe/src/main/java/com/cloudera/impala/util/FileWatchService.java
deleted file mode 100644
index 88a456d..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/FileWatchService.java
+++ /dev/null
@@ -1,140 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.File;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * Service to watch a file for changes. A thread periodically checks the file
- * modification time and uses the provided {@link FileChangeListener} to notify
- * a consumer.
- */
-public class FileWatchService {
-  final static Logger LOG = LoggerFactory.getLogger(FileWatchService.class);
-
-  // Default time to wait between checking the file.
-  static final long DEFAULT_CHECK_INTERVAL_MS = 10 * 1000;
-
-  // Time between checking for changes. Mutable for unit tests.
-  private long checkIntervalMs_ = DEFAULT_CHECK_INTERVAL_MS;
-
-  // Future returned by scheduleAtFixedRate(), needed to stop the checking thread.
-  private ScheduledFuture<?> fileCheckFuture_;
-
-  private final AtomicBoolean running_;
-  private final FileChangeListener changeListener_; // Used to notify when changes occur.
-  private final File file_; // The file to check for changes.
-  private boolean alreadyWarned_; // Avoid repeatedly warning if the file is missing
-  private long prevChange_; // Time of the last observed change
-
-  /**
-   * Listener used to notify of file changes.
-   */
-  public interface FileChangeListener {
-
-    /**
-     * Called when the file changes.
-     */
-    void onFileChange();
-  }
-
-  public FileWatchService(File file, FileChangeListener listener) {
-    Preconditions.checkNotNull(file);
-    Preconditions.checkNotNull(listener);
-    Preconditions.checkArgument(file.exists());
-    running_ = new AtomicBoolean(false);
-    file_ = file;
-    changeListener_ = listener;
-    prevChange_ = 0L;
-    alreadyWarned_ = false;
-  }
-
-  /**
-   * Set the time (in milliseconds) to wait between checking the file for changes.
-   * Only used in tests.
-   */
-  @VisibleForTesting
-  public void setCheckIntervalMs(long checkIntervalMs) {
-    checkIntervalMs_ = checkIntervalMs;
-  }
-
-  /**
-   * Checks if the file has changed since the last observed change and if so,
-   * notifies the listener.
-   */
-  private void checkFile() {
-    if (file_.exists()) {
-      long lastChange = file_.lastModified();
-      if (lastChange > prevChange_) {
-        changeListener_.onFileChange();
-        prevChange_ = lastChange;
-        alreadyWarned_ = false;
-      }
-    } else {
-      if (!alreadyWarned_) {
-        LOG.warn("File does not exist: {}", file_.getPath());
-        alreadyWarned_ = true;
-      }
-    }
-  }
-
-  /**
-   * Starts the thread to check for file changes. Continues checking for file changes
-   * every 'checkIntervalMs_' milliseconds until stop() is called.
-   */
-  public synchronized void start() {
-    Preconditions.checkState(!running_.get());
-    running_.set(true);
-
-    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(
-        new ThreadFactoryBuilder()
-        .setDaemon(true)
-        .setNameFormat("FileWatchThread(" + file_.getPath() + ")-%d")
-        .build());
-    fileCheckFuture_ = executor.scheduleAtFixedRate(new Runnable() {
-      public void run() {
-        try {
-          checkFile();
-        } catch (SecurityException e) {
-          LOG.warn("Not allowed to check read file existence: " + file_.getPath(), e);
-        }
-      }
-    }, 0L, checkIntervalMs_, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Stops the file watching thread.
-   */
-  public synchronized void stop() {
-    Preconditions.checkState(running_.get());
-    running_.set(false);
-    fileCheckFuture_.cancel(false);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/FsPermissionChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/FsPermissionChecker.java b/fe/src/main/java/com/cloudera/impala/util/FsPermissionChecker.java
deleted file mode 100644
index 7523cc8..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/FsPermissionChecker.java
+++ /dev/null
@@ -1,301 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Map;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.AclException;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Lists;
-
-/**
- * Singleton class that can check whether the current user has permission to access paths
- * in a FileSystem.
- */
-public class FsPermissionChecker {
-  private final static Logger LOG = LoggerFactory.getLogger(FsPermissionChecker.class);
-  private final static FsPermissionChecker instance_;
-  private final static Configuration CONF;
-  protected final String user_;
-  private final Set<String> groups_ = new HashSet<String>();
-  private final String supergroup_;
-
-  static {
-    CONF = new Configuration();
-    try {
-      instance_ = new FsPermissionChecker();
-    } catch (IOException e) {
-      throw new RuntimeException(
-          "Error initializing FsPermissionChecker: " + e.getMessage(), e);
-    }
-  }
-
-  private FsPermissionChecker() throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    groups_.addAll(Arrays.asList(ugi.getGroupNames()));
-    supergroup_ = CONF.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
-        DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
-    user_ = ugi.getShortUserName();
-  }
-
-  private boolean isSuperUser() { return groups_.contains(supergroup_); }
-
-  private static List<AclEntryType> ACL_TYPE_PRIORITY =
-      ImmutableList.of(AclEntryType.USER, AclEntryType.GROUP, AclEntryType.OTHER);
-
-  /**
-   * Allows checking different access permissions of a file without repeatedly accessing
-   * the underlying filesystem by caching the results of a status call at construction.
-   */
-  public class Permissions {
-    private final FileStatus fileStatus_;
-    private final FsPermission permissions_;
-    private final AclStatus aclStatus_;
-    private Map<AclEntryType, List<AclEntry>> entriesByTypes_ = Maps.newHashMap();
-    private AclEntry mask_;
-
-    /**
-     * If aclStatus is null, ACL permissions are not checked.
-     */
-    protected Permissions(FileStatus fileStatus, AclStatus aclStatus) {
-      Preconditions.checkNotNull(fileStatus);
-      fileStatus_ = fileStatus;
-      permissions_ = fileStatus.getPermission();
-      aclStatus_ = aclStatus;
-      if (aclStatus_ == null) return;
-
-      // Group the ACLs by type, so that we can apply them in correct priority order. Not
-      // clear from documentation whether aclStatus_.getEntries() guarantees this
-      // ordering, so this is defensive.
-      for (AclEntryType t: ACL_TYPE_PRIORITY) {
-        entriesByTypes_.put(t, Lists.<AclEntry>newArrayList());
-      }
-
-      List<AclEntry> fullAclList =
-          getAclFromPermAndEntries(permissions_, aclStatus_.getEntries());
-      for (AclEntry e: fullAclList) {
-        if (e.getType() == AclEntryType.MASK && e.getScope() != AclEntryScope.DEFAULT) {
-          mask_ = e;
-        } else if (isApplicableAcl(e)) {
-          entriesByTypes_.get(e.getType()).add(e);
-        }
-      }
-    }
-
-    /**
-     * Returns true if the mask should apply. The mask ACL applies only to unnamed user
-     * ACLs (e.g. user::r-x), and all group ACLs.
-     */
-    private boolean shouldApplyMask(AclEntry acl) {
-      if (mask_ == null) return false;
-
-      switch (acl.getType()) {
-        case USER:
-          return acl.getName() != null;
-        case GROUP:
-          return true;
-      }
-      return false;
-    }
-
-    /**
-     * Returns true if this ACL applies to the current user and / or group
-     */
-    private boolean isApplicableAcl(AclEntry e) {
-      // Default ACLs are not used for permission checking, but instead control the
-      // permissions received by child directories
-      if (e.getScope() == AclEntryScope.DEFAULT) return false;
-
-      switch (e.getType()) {
-        case USER:
-          String aclUser = e.getName() == null ? aclStatus_.getOwner() : e.getName();
-          return FsPermissionChecker.this.user_.equals(aclUser);
-        case GROUP:
-          String aclGroup = e.getName() == null ? aclStatus_.getGroup() : e.getName();
-          return FsPermissionChecker.this.groups_.contains(aclGroup);
-        case OTHER:
-          return true;
-        case MASK:
-          return false;
-        default:
-          LOG.warn("Unknown Acl type: " + e.getType());
-          return false;
-      }
-    }
-
-    /**
-     * Returns true if ACLs allow 'action', false if they explicitly disallow 'action',
-     * and 'null' if no ACLs are available.
-     * See http://users.suse.com/~agruen/acl/linux-acls/online for more details about
-     * acl access check algorithm.
-     */
-    private Boolean checkAcls(FsAction action) {
-      // ACLs may not be enabled, so we need this ternary logic. If no ACLs are available,
-      // returning null causes us to fall back to standard ugo permissions.
-      if (aclStatus_ == null) return null;
-
-      // Remember if there is an applicable ACL entry, including owner user, named user,
-      // owning group, named group.
-      boolean foundMatch = false;
-      for (AclEntryType t: ACL_TYPE_PRIORITY) {
-        for (AclEntry e: entriesByTypes_.get(t)) {
-          if (t == AclEntryType.OTHER) {
-            // Processed all ACL entries except the OTHER entry.
-            // If found applicable ACL entries but none of them contain requested
-            // permission, deny access. Otherwise check OTHER entry.
-            return foundMatch ? false : e.getPermission().implies(action);
-          }
-          // If there is an applicable mask, 'action' is allowed iff both the mask and
-          // the underlying ACL permit it.
-          if (e.getPermission().implies(action)) {
-            if (shouldApplyMask(e)) {
-              if (mask_.getPermission().implies(action)) return true;
-            } else {
-              return true;
-            }
-          }
-          // User ACL entry has priority, no need to continue check.
-          if (t == AclEntryType.USER) return false;
-
-          foundMatch = true;
-        }
-      }
-      return false;
-    }
-
-    /**
-     * Returns true if the current user can perform the given action given these
-     * permissions.
-     */
-    public boolean checkPermissions(FsAction action) {
-      if (FsPermissionChecker.this.isSuperUser()) return true;
-      Boolean aclPerms = checkAcls(action);
-      if (aclPerms != null) return aclPerms;
-
-      // Check user, group and then 'other' permissions in turn.
-      if (FsPermissionChecker.this.user_.equals(fileStatus_.getOwner())) {
-        // If the user matches, we must return their access rights whether or not the user
-        // is allowed to access without checking the group. This is counter-intuitive if
-        // the user cannot access the file, but the group permissions would allow it, but
-        // is consistent with UNIX behaviour.
-        return permissions_.getUserAction().implies(action);
-      }
-
-      if (FsPermissionChecker.this.groups_.contains(fileStatus_.getGroup())) {
-        return permissions_.getGroupAction().implies(action);
-      }
-      return permissions_.getOtherAction().implies(action);
-    }
-
-    public boolean canRead() { return checkPermissions(FsAction.READ); }
-    public boolean canWrite() { return checkPermissions(FsAction.WRITE); }
-    public boolean canReadAndWrite() { return canRead() && canWrite(); }
-
-    // This was originally lifted from Hadoop. Won't need it if HDFS-7177 is resolved.
-    // getAclStatus() returns just extended ACL entries, the default file permissions
-    // like "user::,group::,other::" are not included. We need to combine them together
-    // to get full logic ACL list.
-    private List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
-        List<AclEntry> entries) {
-      // File permission always have 3 items.
-      List<AclEntry> aclEntries = Lists.newArrayListWithCapacity(entries.size() + 3);
-
-      // Owner entry implied by owner permission bits.
-      aclEntries.add(new AclEntry.Builder()
-          .setScope(AclEntryScope.ACCESS)
-          .setType(AclEntryType.USER)
-          .setPermission(perm.getUserAction())
-          .build());
-
-      // All extended access ACL entries add by "-setfacl" other than default file
-      // permission.
-      boolean hasAccessAcl = false;
-      for (AclEntry entry: entries) {
-        // AclEntry list should be ordered, all ACCESS one are in first half, DEFAULT one
-        // are in second half, so no need to continue here.
-        if (entry.getScope() == AclEntryScope.DEFAULT) break;
-        hasAccessAcl = true;
-        aclEntries.add(entry);
-      }
-
-      // Mask entry implied by group permission bits, or group entry if there is
-      // no access ACL (only default ACL).
-      aclEntries.add(new AclEntry.Builder()
-          .setScope(AclEntryScope.ACCESS)
-          .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
-          .setPermission(perm.getGroupAction())
-          .build());
-
-      // Other entry implied by other bits.
-      aclEntries.add(new AclEntry.Builder()
-          .setScope(AclEntryScope.ACCESS)
-          .setType(AclEntryType.OTHER)
-          .setPermission(perm.getOtherAction())
-          .build());
-
-      return aclEntries;
-    }
-  }
-
-  /**
-   * Returns a Permissions object that can answer all access permission queries for the
-   * given path.
-   */
-  public Permissions getPermissions(FileSystem fs, Path path) throws IOException {
-    Preconditions.checkNotNull(fs);
-    Preconditions.checkNotNull(path);
-    AclStatus aclStatus = null;
-    try {
-      aclStatus = fs.getAclStatus(path);
-    } catch (AclException ex) {
-      LOG.trace("No ACLs retrieved, skipping ACLs check (HDFS will enforce ACLs)", ex);
-    } catch (UnsupportedOperationException ex) {
-      LOG.trace("No ACLs retrieved, unsupported", ex);
-    }
-    return new Permissions(fs.getFileStatus(path), aclStatus);
-  }
-
-  /**
-   * Returns the FsPermissionChecker singleton.
-   */
-  public static FsPermissionChecker getInstance() { return instance_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/GlogAppender.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/GlogAppender.java b/fe/src/main/java/com/cloudera/impala/util/GlogAppender.java
deleted file mode 100644
index a5e1eb1..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/GlogAppender.java
+++ /dev/null
@@ -1,129 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Properties;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.spi.LoggingEvent;
-
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TLogLevel;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * log4j appender which calls into C++ code to log messages at their correct severities
- * via glog.
- */
-public class GlogAppender extends AppenderSkeleton {
-  // GLOG takes care of formatting, so we don't require a layout
-  public boolean requiresLayout() { return false; }
-
-  // Required implementation by superclass.
-  public void ActivateOptions() { }
-
-  // Required implementation by superclass
-  public void close() { }
-
-  private TLogLevel levelToSeverity(Level level) {
-    Preconditions.checkState(!level.equals(Level.OFF));
-    // TODO: Level does not work well in a HashMap or switch statement due to some
-    // strangeness with equality testing.
-    if (level.equals(Level.TRACE)) return TLogLevel.VLOG_3;
-    if (level.equals(Level.ALL)) return TLogLevel.VLOG_3;
-    if (level.equals(Level.DEBUG)) return TLogLevel.VLOG;
-    if (level.equals(Level.ERROR)) return TLogLevel.ERROR;
-    if (level.equals(Level.FATAL)) return TLogLevel.FATAL;
-    if (level.equals(Level.INFO)) return TLogLevel.INFO;
-    if (level.equals(Level.WARN)) return TLogLevel.WARN;
-
-    throw new IllegalStateException("Unknown log level: " + level.toString());
-  }
-
-  @Override
-  public void append(LoggingEvent event) {
-    Level level = event.getLevel();
-    if (level.equals(Level.OFF)) return;
-
-    String msg = event.getRenderedMessage();
-    if (event.getThrowableInformation() != null) {
-      msg = msg + "\nJava exception follows:\n" +
-          Joiner.on("\n").join(event.getThrowableStrRep());
-    }
-    int lineNumber = Integer.parseInt(event.getLocationInformation().getLineNumber());
-    String fileName = event.getLocationInformation().getFileName();
-    NativeLogger.LogToGlog(
-        levelToSeverity(level).getValue(), msg, fileName, lineNumber);
-  }
-
-  /**
-   * Returns a log4j level string corresponding to the Glog log level
-   */
-  private static String log4jLevelForTLogLevel(TLogLevel logLevel)
-      throws InternalException {
-    switch (logLevel) {
-      case INFO: return "INFO";
-      case WARN: return "WARN";
-      case ERROR: return "ERROR";
-      case FATAL: return "FATAL";
-      case VLOG:
-      case VLOG_2: return "DEBUG";
-      case VLOG_3: return "TRACE";
-      default: throw new InternalException("Unknown log level:" + logLevel);
-    }
-  }
-
-  /**
-   * Manually override Log4j root logger configuration. Any values in log4j.properties
-   * not overridden (that is, anything but the root logger and its default level) will
-   * continue to have effect.
-   *  - impalaLogLevel - the maximum log level for com.cloudera.impala.* classes
-   *  - otherLogLevel - the maximum log level for all other classes
-   */
-  public static void Install(TLogLevel impalaLogLevel, TLogLevel otherLogLevel)
-      throws InternalException {
-    Properties properties = new Properties();
-    properties.setProperty("log4j.appender.glog", GlogAppender.class.getName());
-
-    // These settings are relatively subtle. log4j provides many ways to filter log
-    // messages, and configuring them in the right order is a bit of black magic.
-    //
-    // The 'Threshold' property supercedes everything, so must be set to its most
-    // permissive and applies to any message sent to the glog appender.
-    //
-    // The 'rootLogger' property controls the default maximum logging level (where more
-    // verbose->larger logging level) for the entire space of classes. This will apply to
-    // all non-Impala classes, so is set to otherLogLevel.
-    //
-    // Finally we can configure per-package logging which overrides the rootLogger
-    // setting. In order to control Impala's logging independently of the rest of the
-    // world, we set the log level for com.cloudera.impala.
-    properties.setProperty("log4j.rootLogger",
-        log4jLevelForTLogLevel(otherLogLevel) + ",glog");
-    properties.setProperty("log4j.appender.glog.Threshold", "TRACE");
-    properties.setProperty("log4j.logger.com.cloudera.impala",
-        log4jLevelForTLogLevel(impalaLogLevel));
-    PropertyConfigurator.configure(properties);
-    Logger.getLogger(GlogAppender.class).info(String.format("Logging initialized. " +
-        "Impala: %s, All other: %s", impalaLogLevel, otherLogLevel));
-  }
-};

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/HdfsCachingUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/HdfsCachingUtil.java b/fe/src/main/java/com/cloudera/impala/util/HdfsCachingUtil.java
deleted file mode 100644
index a3a1fa0..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/HdfsCachingUtil.java
+++ /dev/null
@@ -1,515 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.JniCatalogConstants;
-import com.cloudera.impala.thrift.THdfsCachingOp;
-import com.google.common.base.Preconditions;
-
-/**
- * Utility class for submitting and dropping HDFS cache requests.
- */
-public class HdfsCachingUtil {
-  private static final Logger LOG = Logger.getLogger(HdfsCachingUtil.class);
-
-  // The key name used to save cache directive IDs in table/partition properties.
-  public final static String CACHE_DIR_ID_PROP_NAME = "cache_directive_id";
-
-  // The key name used to store the replication factor for cached files
-  public final static String CACHE_DIR_REPLICATION_PROP_NAME = "cache_replication";
-
-  // The number of caching refresh intervals that can go by when waiting for data to
-  // become cached before assuming no more progress is being made.
-  private final static int MAX_UNCHANGED_CACHING_REFRESH_INTERVALS = 5;
-
-  private static DistributedFileSystem dfs = null;
-
-  /**
-   * Returns the dfs singleton object.
-   */
-  private static DistributedFileSystem getDfs() throws ImpalaRuntimeException {
-    if (dfs == null) {
-      try {
-        dfs = FileSystemUtil.getDistributedFileSystem();
-      } catch (IOException e) {
-        throw new ImpalaRuntimeException("HdfsCachingUtil failed to initialize the " +
-            "DistributedFileSystem: ", e);
-      }
-    }
-    return dfs;
-  }
-
-  /**
-   * Caches the location of the given Hive Metastore Table and updates the
-   * table's properties with the submitted cache directive ID. The caller is
-   * responsible for not caching the same table twice, as HDFS will create a second
-   * cache directive even if it is similar to an already existing one.
-   *
-   * Returns the ID of the submitted cache directive and throws if there is an error
-   * submitting.
-   */
-  public static long submitCacheTblDirective(
-      org.apache.hadoop.hive.metastore.api.Table table,
-      String poolName, short replication) throws ImpalaRuntimeException {
-    long id = HdfsCachingUtil.submitDirective(new Path(table.getSd().getLocation()),
-        poolName, replication);
-    table.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
-    table.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
-    return id;
-  }
-
-  /**
-   * Caches the location of the given partition and updates the
-   * partitions's properties with the submitted cache directive ID. The caller is
-   * responsible for not caching the same partition twice, as HDFS will create a second
-   * cache directive even if it is similar to an already existing one.
-   *
-   * Returns the ID of the submitted cache directive and throws if there is an error
-   * submitting the directive.
-   */
-  public static long submitCachePartitionDirective(HdfsPartition part,
-      String poolName, short replication) throws ImpalaRuntimeException {
-    long id = HdfsCachingUtil.submitDirective(new Path(part.getLocation()),
-        poolName, replication);
-    part.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
-    part.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
-    return id;
-  }
-
-  /**
-   * Convenience method for working directly on a metastore partition. See
-   * submitCachePartitionDirective(HdfsPartition, String, short) for more details.
-   */
-  public static long submitCachePartitionDirective(
-      org.apache.hadoop.hive.metastore.api.Partition part,
-      String poolName, short replication) throws ImpalaRuntimeException {
-    long id = HdfsCachingUtil.submitDirective(new Path(part.getSd().getLocation()),
-        poolName, replication);
-    part.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
-    part.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
-    return id;
-  }
-
-  /**
-   * Removes the cache directive associated with the table from HDFS, uncaching all
-   * data. Also updates the table's metadata. No-op if the table is not cached.
-   */
-  public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table table)
-      throws ImpalaRuntimeException {
-    Preconditions.checkNotNull(table);
-    LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName());
-    Long id = getCacheDirectiveId(table.getParameters());
-    if (id == null) return;
-    HdfsCachingUtil.removeDirective(id);
-    table.getParameters().remove(CACHE_DIR_ID_PROP_NAME);
-    table.getParameters().remove(CACHE_DIR_REPLICATION_PROP_NAME);
-  }
-
-  /**
-   * Removes the cache directive associated with the partition from HDFS, uncaching all
-   * data. Also updates the partition's metadata to remove the cache directive ID.
-   * No-op if the table is not cached.
-   */
-  public static void uncachePartition(HdfsPartition part) throws ImpalaException {
-    Preconditions.checkNotNull(part);
-    Long id = getCacheDirectiveId(part.getParameters());
-    if (id == null) return;
-    HdfsCachingUtil.removeDirective(id);
-    part.getParameters().remove(CACHE_DIR_ID_PROP_NAME);
-    part.getParameters().remove(CACHE_DIR_REPLICATION_PROP_NAME);
-  }
-
-  /**
-   * Convenience method for working directly on a metastore partition. See
-   * uncachePartition(HdfsPartition) for more details.
-   */
-  public static void uncachePartition(
-    org.apache.hadoop.hive.metastore.api.Partition part) throws ImpalaException {
-    Preconditions.checkNotNull(part);
-    Long id = getCacheDirectiveId(part.getParameters());
-    if (id == null) return;
-    HdfsCachingUtil.removeDirective(id);
-    part.getParameters().remove(CACHE_DIR_ID_PROP_NAME);
-    part.getParameters().remove(CACHE_DIR_REPLICATION_PROP_NAME);
-  }
-
-  /**
-   * Returns the cache directive ID from the given table/partition parameter
-   * map. Returns null if the CACHE_DIR_ID_PROP_NAME key was not set or if
-   * there was an error parsing the associated ID.
-   */
-  public static Long getCacheDirectiveId(Map<String, String> params) {
-    if (params == null) return null;
-    String idStr = params.get(CACHE_DIR_ID_PROP_NAME);
-    if (idStr == null) return null;
-    try {
-      return Long.parseLong(idStr);
-    } catch (NumberFormatException e) {
-      return null;
-    }
-  }
-
-  /**
-   * Given a cache directive ID, returns the pool the directive is cached in.
-   * Returns null if no outstanding cache directive match this ID.
-   */
-  public static String getCachePool(long directiveId)
-      throws ImpalaRuntimeException {
-    CacheDirectiveEntry entry = getDirective(directiveId);
-    return entry == null ? null : entry.getInfo().getPool();
-  }
-
-  /**
-   * Given a cache directive ID, returns the replication factor for the directive.
-   * Returns null if no outstanding cache directives match this ID.
-   */
-  public static Short getCacheReplication(long directiveId)
-      throws ImpalaRuntimeException {
-    CacheDirectiveEntry entry = getDirective(directiveId);
-    return entry != null ? entry.getInfo().getReplication() : null;
-  }
-
-  /**
-   * Returns the cache replication value from the parameters map. We assume that only
-   * cached table parameters are used and the property is always present.
-   */
-  public static Short getCachedCacheReplication(Map<String, String> params) {
-    Preconditions.checkNotNull(params);
-    String replication = params.get(CACHE_DIR_REPLICATION_PROP_NAME);
-    if (replication == null) {
-      return JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR;
-    }
-    try {
-      return Short.parseShort(replication);
-    } catch (NumberFormatException e) {
-      return JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR;
-    }
-  }
-
-  /**
-   * Waits on a cache directive to either complete or stop making progress. Progress is
-   * checked by polling the HDFS caching stats every
-   * DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS. We verify the request's
-   * "currentBytesCached" is increasing compared to "bytesNeeded".
-   * If "currentBytesCached" == "bytesNeeded" or if no progress is made for a
-   * MAX_UNCHANGED_CACHING_REFRESH_INTERVALS, this function returns.
-   */
-  public static void waitForDirective(long directiveId)
-      throws ImpalaRuntimeException  {
-    long bytesNeeded = 0L;
-    long currentBytesCached = 0L;
-    CacheDirectiveEntry cacheDir = getDirective(directiveId);
-    if (cacheDir == null) return;
-
-    bytesNeeded = cacheDir.getStats().getBytesNeeded();
-    currentBytesCached = cacheDir.getStats().getBytesCached();
-    LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " +
-        "cached (%d) / needed (%d)", directiveId, currentBytesCached, bytesNeeded));
-    // All the bytes are cached, just return.
-    if (bytesNeeded == currentBytesCached) return;
-
-    // The refresh interval is how often HDFS will update cache directive stats. We use
-    // this value to determine how frequently we should poll for changes.
-    long hdfsRefreshIntervalMs = getDfs().getConf().getLong(
-        DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
-        DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
-    Preconditions.checkState(hdfsRefreshIntervalMs > 0);
-
-    // Loop until either MAX_UNCHANGED_CACHING_REFRESH_INTERVALS have passed with no
-    // changes or all required data is cached.
-    int unchangedCounter = 0;
-    while (unchangedCounter < MAX_UNCHANGED_CACHING_REFRESH_INTERVALS) {
-      long previousBytesCached = currentBytesCached;
-      cacheDir = getDirective(directiveId);
-      if (cacheDir == null) return;
-      currentBytesCached = cacheDir.getStats().getBytesCached();
-      bytesNeeded = cacheDir.getStats().getBytesNeeded();
-      if (currentBytesCached == bytesNeeded) {
-        LOG.debug(String.format("Cache directive id: %d has completed." +
-            "Bytes cached (%d) / needed (%d)", directiveId, currentBytesCached,
-            bytesNeeded));
-        return;
-      }
-
-      if (currentBytesCached == previousBytesCached) {
-        ++unchangedCounter;
-      } else {
-        unchangedCounter = 0;
-      }
-      try {
-        // Sleep for the refresh interval + a little bit more to ensure a full interval
-        // has completed. A value of 25% the refresh interval was arbitrarily chosen.
-        Thread.sleep((long) (hdfsRefreshIntervalMs * 1.25));
-      } catch (InterruptedException e) { /* ignore */ }
-    }
-    LOG.warn(String.format("No changes in cached bytes in: %d(ms). All data may not " +
-        "be cached. Final stats for cache directive id: %d. Bytes cached (%d)/needed " +
-        "(%d)", hdfsRefreshIntervalMs * MAX_UNCHANGED_CACHING_REFRESH_INTERVALS,
-        directiveId, currentBytesCached, bytesNeeded));
-  }
-
-  /**
-   * Submits a new caching directive for the specified cache pool name, path and
-   * replication. Returns the directive ID if the submission was successful or an
-   * ImpalaRuntimeException if the submission fails.
-   */
-  private static long submitDirective(Path path, String poolName, short replication)
-      throws ImpalaRuntimeException {
-    Preconditions.checkNotNull(path);
-    Preconditions.checkState(poolName != null && !poolName.isEmpty());
-    CacheDirectiveInfo info = new CacheDirectiveInfo.Builder()
-        .setExpiration(Expiration.NEVER)
-        .setPool(poolName)
-        .setReplication(replication)
-        .setPath(path).build();
-    LOG.debug("Submitting cache directive: " + info.toString());
-    try {
-      return getDfs().addCacheDirective(info);
-    } catch (IOException e) {
-      throw new ImpalaRuntimeException(e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Update cache directive for a table and updates the metastore parameters.
-   * Returns the cache directive ID
-   */
-  public static long modifyCacheDirective(Long id,
-      org.apache.hadoop.hive.metastore.api.Table table,
-      String poolName, short replication) throws ImpalaRuntimeException {
-    Preconditions.checkNotNull(id);
-    HdfsCachingUtil.modifyCacheDirective(id, new Path(table.getSd().getLocation()),
-        poolName, replication);
-    table.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
-    table.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
-    return id;
-  }
-
-  /**
-   * Update cache directive for a partition and update the metastore parameters.
-   * Returns the cache directive ID
-   */
-  public static long modifyCacheDirective(Long id, HdfsPartition part, String poolName,
-      short replication) throws ImpalaRuntimeException {
-    Preconditions.checkNotNull(id);
-    HdfsCachingUtil.modifyCacheDirective(id, new Path(part.getLocation()),
-        poolName, replication);
-    part.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
-    part.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
-    return id;
-  }
-
-  /**
-   * Update an existing cache directive to avoid having the same entry multiple
-   * times
-   */
-  private static void modifyCacheDirective(Long id, Path path, String poolName,
-      short replication) throws ImpalaRuntimeException {
-    Preconditions.checkNotNull(path);
-    Preconditions.checkNotNull(id);
-    Preconditions.checkState(poolName != null && !poolName.isEmpty());
-    CacheDirectiveInfo info = new CacheDirectiveInfo.Builder()
-        .setId(id)
-        .setExpiration(Expiration.NEVER)
-        .setPool(poolName)
-        .setReplication(replication)
-        .setPath(path).build();
-    LOG.debug("Modifying cache directive: " + info.toString());
-    try {
-      getDfs().modifyCacheDirective(info);
-    } catch (IOException e) {
-      throw new ImpalaRuntimeException(e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Removes the given cache directive if it exists, uncaching the data. If the
-   * cache request does not exist in HDFS no error is returned.
-   * Throws an ImpalaRuntimeException if there was any problem removing the
-   * directive.
-   */
-  private static void removeDirective(long directiveId) throws ImpalaRuntimeException {
-    LOG.debug("Removing cache directive id: " + directiveId);
-    try {
-      getDfs().removeCacheDirective(directiveId);
-    } catch (IOException e) {
-      // There is no special exception type for the case where a directive ID does not
-      // exist so we must inspect the error message.
-      if (e.getMessage().contains("No directive with ID")) return;
-      throw new ImpalaRuntimeException(e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Gets the cache directive matching the given ID. Returns null if no matching
-   * directives were found.
-   */
-  private static CacheDirectiveEntry getDirective(long directiveId)
-      throws ImpalaRuntimeException {
-    LOG.trace("Getting cache directive id: " + directiveId);
-    CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder()
-        .setId(directiveId)
-        .build();
-    try {
-      RemoteIterator<CacheDirectiveEntry> itr = getDfs().listCacheDirectives(filter);
-      if (itr.hasNext()) return itr.next();
-    } catch (IOException e) {
-      // Handle connection issues with e.g. HDFS and possible not found errors
-      throw new ImpalaRuntimeException(e.getMessage(), e);
-    }
-    throw new ImpalaRuntimeException(
-        "HDFS cache directive filter returned empty result. This must not happen");
-  }
-
-  /**
-   * Check if the poolName matches the pool of the cache directive
-   * identified by directiveId
-   */
-  public static boolean isSamePool(String poolName, Long directiveId)
-      throws ImpalaRuntimeException {
-    return poolName.equals(getCachePool(directiveId));
-  }
-
-  /**
-   * Helper method for frequent lookup of replication factor in the thrift caching
-   * structure.
-   */
-  public static short getReplicationOrDefault(THdfsCachingOp op) {
-    return op.isSetReplication() ? op.getReplication() :
-      JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR;
-  }
-
-  /**
-   * Returns a boolean indicating if the given thrift caching operation would perform an
-   * update on an already existing cache directive.
-   */
-  public static boolean isUpdateOp(THdfsCachingOp op, Map<String, String> params)
-      throws ImpalaRuntimeException {
-
-    Long directiveId = Long.parseLong(params.get(CACHE_DIR_ID_PROP_NAME));
-    CacheDirectiveEntry entry = getDirective(directiveId);
-    Preconditions.checkNotNull(entry);
-
-    // Verify cache pool
-    if (!op.getCache_pool_name().equals(entry.getInfo().getPool())) {
-      return false;
-    }
-
-    // Check cache replication factor
-    if ((op.isSetReplication() && op.getReplication() !=
-        entry.getInfo().getReplication()) || ( !op.isSetReplication() &&
-        entry.getInfo().getReplication() !=
-        JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR)) {
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Validates the properties of the chosen cache pool. Throws on error.
-   */
-  public static void validateCachePool(THdfsCachingOp op, Long directiveId,
-      TableName table, HdfsPartition partition)
-      throws ImpalaRuntimeException {
-
-    CacheDirectiveEntry entry = getDirective(directiveId);
-    Preconditions.checkNotNull(entry);
-
-    if (!op.getCache_pool_name().equals(entry.getInfo().getPool())) {
-      throw new ImpalaRuntimeException(String.format("Cannot cache partition in " +
-          "pool '%s' because it is already cached in '%s'. To change the cache " +
-          "pool for this partition, first uncache using: ALTER TABLE %s.%s " +
-          "%sSET UNCACHED", op.getCache_pool_name(),
-          entry.getInfo().getPool(), table.getDb(), table,
-          // Insert partition string if partition non null
-          partition != null ? String.format(" PARTITION(%s) ",
-          partition.getPartitionName().replaceAll("/", ", ")) : ""));
-    }
-  }
-
-  /**
-   * Validates the properties of the chosen cache pool. Throws on error.
-   */
-  public static void validateCachePool(THdfsCachingOp op, Long directiveId,
-      TableName table) throws ImpalaRuntimeException {
-    validateCachePool(op, directiveId, table, null);
-  }
-
-  /**
-   * Validates and returns true if a parameter map contains a cache directive ID and
-   * validates it against the NameNode to make sure it exists. If the cache
-   * directive ID does not exist, we remove the value from the parameter map,
-   * issue a log message and return false. As the value is not written back to the
-   * Hive MS from this method, the result will be only valid until the next metadata
-   * fetch. Lastly, we update the cache replication factor in the parameters with the
-   * value read from HDFS.
-   */
-  public static boolean validateCacheParams(Map<String, String> params) {
-    Long directiveId = getCacheDirectiveId(params);
-    if (directiveId == null) return false;
-
-    CacheDirectiveEntry entry = null;
-    try {
-      entry = getDirective(directiveId);
-    } catch (ImpalaRuntimeException e) {
-      if (e.getCause() != null && e.getCause() instanceof RemoteException) {
-        // This exception signals that the cache directive no longer exists.
-        LOG.error("Cache directive does not exist", e);
-        params.remove(CACHE_DIR_ID_PROP_NAME);
-        params.remove(CACHE_DIR_REPLICATION_PROP_NAME);
-      } else {
-        // This exception signals that there was a connection problem with HDFS.
-        LOG.error("IO Exception, possible connectivity issues with HDFS", e);
-      }
-      return false;
-    }
-    Preconditions.checkNotNull(entry);
-
-    // On the upgrade path the property might not exist, if it exists
-    // and is different from the one from the meta store, issue a warning.
-    String replicationFactor = params.get(CACHE_DIR_REPLICATION_PROP_NAME);
-    if (replicationFactor != null &&
-        Short.parseShort(replicationFactor) != entry.getInfo().getReplication()) {
-      LOG.info("Replication factor for entry in HDFS differs from value in Hive MS: " +
-          entry.getInfo().getPath().toString() + " " +
-          entry.getInfo().getReplication().toString() + " != " +
-          params.get(CACHE_DIR_REPLICATION_PROP_NAME));
-    }
-    params.put(CACHE_DIR_REPLICATION_PROP_NAME,
-        String.valueOf(entry.getInfo().getReplication()));
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/KuduUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/KuduUtil.java b/fe/src/main/java/com/cloudera/impala/util/KuduUtil.java
deleted file mode 100644
index 4f627d8..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/KuduUtil.java
+++ /dev/null
@@ -1,268 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.StringReader;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import javax.json.Json;
-import javax.json.JsonArray;
-import javax.json.JsonReader;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TDistributeByRangeParam;
-import com.cloudera.impala.thrift.TRangeLiteral;
-import com.cloudera.impala.thrift.TRangeLiteralList;
-import com.google.common.base.Splitter;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.kudu.ColumnSchema;
-import org.apache.kudu.Schema;
-import org.apache.kudu.Type;
-import org.apache.kudu.client.KuduTable;
-import org.apache.kudu.client.PartialRow;
-
-import static com.cloudera.impala.catalog.Type.parseColumnType;
-import static java.lang.String.format;
-
-public class KuduUtil {
-
-  private static final String SPLIT_KEYS_ERROR_MESSAGE = "Error parsing splits keys.";
-
-  /**
-   * Compare the schema of a HMS table and a Kudu table. Returns true if both tables have
-   * a matching schema.
-   */
-  public static boolean compareSchema(Table msTable, KuduTable kuduTable)
-      throws ImpalaRuntimeException {
-    List<FieldSchema> msFields = msTable.getSd().getCols();
-    List<ColumnSchema> kuduFields = kuduTable.getSchema().getColumns();
-    if (msFields.size() != kuduFields.size()) return false;
-
-    HashMap<String, ColumnSchema> kuduFieldMap = Maps.newHashMap();
-    for (ColumnSchema kuduField : kuduFields) {
-      kuduFieldMap.put(kuduField.getName().toUpperCase(), kuduField);
-    }
-
-    for (FieldSchema msField : msFields) {
-      ColumnSchema kuduField = kuduFieldMap.get(msField.getName().toUpperCase());
-      if (kuduField == null
-          || fromImpalaType(parseColumnType(msField.getType())) != kuduField.getType()) {
-        return false;
-      }
-    }
-
-    return true;
-  }
-
-  /**
-   * Parses split keys from statements.
-   *
-   * Split keys are expected to be in json, as an array of arrays, in the form:
-   * '[[value1_col1, value1_col2, ...], [value2_col1, value2_col2, ...], ...]'
-   *
-   * Each inner array corresponds to a split key and should have one matching entry for
-   * each key column specified in 'schema'.
-   */
-  public static List<PartialRow> parseSplits(Schema schema, String kuduSplits)
-      throws ImpalaRuntimeException {
-
-    // If there are no splits return early.
-    if (kuduSplits == null || kuduSplits.isEmpty()) return ImmutableList.of();
-
-    ImmutableList.Builder<PartialRow> splitRows = ImmutableList.builder();
-
-    // ...Otherwise parse the splits. We're expecting splits in the format of a list of
-    // lists of keys. We only support specifying splits for int and string keys
-    // (currently those are the only type of keys allowed in Kudu too).
-    try {
-      JsonReader jr = Json.createReader(new StringReader(kuduSplits));
-      JsonArray keysList = jr.readArray();
-      for (int i = 0; i < keysList.size(); i++) {
-        PartialRow splitRow = new PartialRow(schema);
-        JsonArray compoundKey = keysList.getJsonArray(i);
-        if (compoundKey.size() != schema.getPrimaryKeyColumnCount()) {
-          throw new ImpalaRuntimeException(SPLIT_KEYS_ERROR_MESSAGE +
-              " Wrong number of keys.");
-        }
-        for (int j = 0; j < compoundKey.size(); j++) {
-          setKey(schema.getColumnByIndex(j).getType(), compoundKey, j, splitRow);
-        }
-        splitRows.add(splitRow);
-      }
-    } catch (ImpalaRuntimeException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException(SPLIT_KEYS_ERROR_MESSAGE + " Problem parsing json"
-          + ": " + e.getMessage(), e);
-    }
-
-    return splitRows.build();
-  }
-
-  /**
-   * Given the TDistributeByRangeParam from the CREATE statement, creates the
-   * appropriate split rows.
-   */
-  public static List<PartialRow> parseSplits(Schema schema,
-      TDistributeByRangeParam param) throws ImpalaRuntimeException {
-    ImmutableList.Builder<PartialRow> splitRows = ImmutableList.builder();
-    for (TRangeLiteralList literals : param.getSplit_rows()) {
-      PartialRow splitRow = new PartialRow(schema);
-      List<TRangeLiteral> literalValues = literals.getValues();
-      for (int i = 0; i < literalValues.size(); ++i) {
-        String colName = param.getColumns().get(i);
-        ColumnSchema col = schema.getColumn(colName);
-        setKey(col.getType(), literalValues.get(i), schema.getColumnIndex(colName),
-            colName, splitRow);
-      }
-      splitRows.add(splitRow);
-    }
-    return splitRows.build();
-  }
-
-  /**
-   * Sets the value in 'key' at 'pos', given the json representation.
-   */
-  private static void setKey(Type type, JsonArray array, int pos, PartialRow key)
-      throws ImpalaRuntimeException {
-    switch (type) {
-      case BOOL: key.addBoolean(pos, array.getBoolean(pos)); break;
-      case INT8: key.addByte(pos, (byte) array.getInt(pos)); break;
-      case INT16: key.addShort(pos, (short) array.getInt(pos)); break;
-      case INT32: key.addInt(pos, array.getInt(pos)); break;
-      case INT64: key.addLong(pos, array.getJsonNumber(pos).longValue()); break;
-      case STRING: key.addString(pos, array.getString(pos)); break;
-      default:
-        throw new ImpalaRuntimeException("Key columns not supported for type: "
-            + type.toString());
-    }
-  }
-
-  /**
-   * Sets the value in 'key' at 'pos', given the range literal.
-   */
-  private static void setKey(Type type, TRangeLiteral literal, int pos, String colName,
-      PartialRow key) throws ImpalaRuntimeException {
-    switch (type) {
-      case BOOL:
-        checkCorrectType(literal.isSetBool_literal(), type, colName, literal);
-        key.addBoolean(pos, literal.isBool_literal());
-        break;
-      case INT8:
-        checkCorrectType(literal.isSetInt_literal(), type, colName, literal);
-        key.addByte(pos, (byte) literal.getInt_literal());
-        break;
-      case INT16:
-        checkCorrectType(literal.isSetInt_literal(), type, colName, literal);
-        key.addShort(pos, (short) literal.getInt_literal());
-        break;
-      case INT32:
-        checkCorrectType(literal.isSetInt_literal(), type, colName, literal);
-        key.addInt(pos, (int) literal.getInt_literal());
-        break;
-      case INT64:
-        checkCorrectType(literal.isSetInt_literal(), type, colName, literal);
-        key.addLong(pos, literal.getInt_literal());
-        break;
-      case STRING:
-        checkCorrectType(literal.isSetString_literal(), type, colName, literal);
-        key.addString(pos, literal.getString_literal());
-        break;
-      default:
-        throw new ImpalaRuntimeException("Key columns not supported for type: "
-            + type.toString());
-    }
-  }
-
-  /**
-   * If correctType is true, returns. Otherwise throws a formatted error message
-   * indicating problems with the type of the literal of the range literal.
-   */
-  private static void checkCorrectType(boolean correctType, Type t, String colName,
-      TRangeLiteral literal) throws ImpalaRuntimeException {
-    if (correctType) return;
-    throw new ImpalaRuntimeException(
-        format("Expected %s literal for column '%s' got '%s'", t.getName(), colName,
-            toString(literal)));
-  }
-
-  /**
-   * Parses a string of the form "a, b, c" and returns a set of values split by ',' and
-   * stripped of the whitespace.
-   */
-  public static HashSet<String> parseKeyColumns(String cols) {
-    return Sets.newHashSet(Splitter.on(",").trimResults().split(cols.toLowerCase()));
-  }
-
-  public static List<String> parseKeyColumnsAsList(String cols) {
-    return Lists.newArrayList(Splitter.on(",").trimResults().split(cols.toLowerCase()));
-  }
-
-  /**
-   * Converts a given Impala catalog type to the Kudu type. Throws an exception if the
-   * type cannot be converted.
-   */
-  public static Type fromImpalaType(com.cloudera.impala.catalog.Type t)
-      throws ImpalaRuntimeException {
-    if (!t.isScalarType()) {
-      throw new ImpalaRuntimeException(format(
-          "Non-scalar type %s is not supported in Kudu", t.toSql()));
-    }
-    ScalarType s = (ScalarType) t;
-    switch (s.getPrimitiveType()) {
-      case TINYINT: return Type.INT8;
-      case SMALLINT: return Type.INT16;
-      case INT: return Type.INT32;
-      case BIGINT: return Type.INT64;
-      case BOOLEAN: return Type.BOOL;
-      case CHAR: return Type.STRING;
-      case STRING: return Type.STRING;
-      case VARCHAR: return Type.STRING;
-      case DOUBLE: return Type.DOUBLE;
-      case FLOAT: return Type.FLOAT;
-        /* Fall through below */
-      case INVALID_TYPE:
-      case NULL_TYPE:
-      case TIMESTAMP:
-      case BINARY:
-      case DATE:
-      case DATETIME:
-      case DECIMAL:
-      default:
-        throw new ImpalaRuntimeException(format(
-            "Type %s is not supported in Kudu", s.toSql()));
-    }
-  }
-
-  /**
-   * Returns the string value of the RANGE literal.
-   */
-  static String toString(TRangeLiteral l) throws ImpalaRuntimeException {
-    if (l.isSetBool_literal()) return String.valueOf(l.bool_literal);
-    if (l.isSetString_literal()) return String.valueOf(l.string_literal);
-    if (l.isSetInt_literal()) return String.valueOf(l.int_literal);
-    throw new ImpalaRuntimeException("Unsupported type for RANGE literal.");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/ListMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/ListMap.java b/fe/src/main/java/com/cloudera/impala/util/ListMap.java
deleted file mode 100644
index 989a510..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/ListMap.java
+++ /dev/null
@@ -1,77 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Lists;
-
-/**
- * Implementation of a bi-directional map between an index of type
- * Integer and an object of type T.  The indices are allocated on
- * demand when a reverse lookup occurs for an object not already in
- * the map.
- *
- * The forward mapping is implemented as a List<> so that it can be
- * directly used as a Thrift structure.
- */
-public class ListMap<T> {
-  // Maps from Integer to T.
-  private ArrayList<T> list_ = Lists.newArrayList();
-  // Maps from T to Integer.
-  private final Map<T, Integer> map_ = Maps.newHashMap();
-
-  public ArrayList<T> getList() { return list_; }
-  public int size() { return list_.size(); }
-
-  /**
-   * Map from Integer index to T object.
-   */
-  public T getEntry(int index) { return list_.get(index); }
-
-  /**
-   * Map from T t to Integer index. If the mapping from t doesn't
-   * exist, then create a new mapping from t to a unique index.
-   */
-  public int getIndex(T t) {
-    Integer index = map_.get(t);
-    if (index == null) {
-      // No match was found, add a new entry.
-      list_.add(t);
-      index = list_.size() - 1;
-      map_.put(t, index);
-    }
-    return index;
-  }
-
-  /**
-   * Populate the bi-map from the given list.  Does not perform a copy
-   * of the list.
-   */
-  public void populate(ArrayList<T> list) {
-    Preconditions.checkState(list_.isEmpty() && map_.isEmpty());
-    list_ = list;
-    for (int i = 0; i < list_.size(); ++i) {
-      map_.put(list_.get(i), i);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/MaxRowsProcessedVisitor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/MaxRowsProcessedVisitor.java b/fe/src/main/java/com/cloudera/impala/util/MaxRowsProcessedVisitor.java
deleted file mode 100644
index ac85ff8..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/MaxRowsProcessedVisitor.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.planner.NestedLoopJoinNode;
-import com.cloudera.impala.planner.HashJoinNode;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.planner.ScanNode;
-
-/**
- * Returns the maximum number of rows processed by any node in a given plan tree
- */
-public class MaxRowsProcessedVisitor implements Visitor<PlanNode> {
-
-  private boolean abort_ = false;
-  private long result_ = -1l;
-
-  @Override
-  public void visit(PlanNode caller) {
-    if (abort_) return;
-
-    if (caller instanceof ScanNode) {
-      long tmp = caller.getInputCardinality();
-      ScanNode scan = (ScanNode) caller;
-      boolean missingStats = scan.isTableMissingStats() || scan.hasCorruptTableStats();
-      // In the absence of collection stats, treat scans on collections as if they
-      // have no limit.
-      if (scan.isAccessingCollectionType() || (missingStats && !scan.hasLimit())) {
-        abort_ = true;
-        return;
-      }
-      result_ = Math.max(result_, tmp);
-    } else if (caller instanceof HashJoinNode || caller instanceof NestedLoopJoinNode) {
-      // Revisit when multiple scan nodes can be executed in a single fragment, IMPALA-561
-      abort_ = true;
-      return;
-    } else {
-      long in = caller.getInputCardinality();
-      long out = caller.getCardinality();
-      if ((in == -1) || (out == -1)) {
-        abort_ = true;
-        return;
-      }
-      result_ = Math.max(result_, Math.max(in, out));
-    }
-  }
-
-  public long get() {
-    return abort_ ? -1 : result_;
-  }
-}



[31/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/com/cloudera/impala/catalog/CatalogServiceCatalog.java
deleted file mode 100644
index 27d25e1..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/CatalogServiceCatalog.java
+++ /dev/null
@@ -1,1268 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.UUID;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.ql.exec.FunctionUtils;
-import org.apache.log4j.Logger;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.TException;
-
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.hive.executor.UdfExecutor;
-import com.cloudera.impala.thrift.TCatalog;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TGetAllCatalogObjectsResponse;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.SentryProxy;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.io.Files;
-
-/**
- * Specialized Catalog that implements the CatalogService specific Catalog
- * APIs. The CatalogServiceCatalog manages loading of all the catalog metadata
- * and processing of DDL requests. For each DDL request, the CatalogServiceCatalog
- * will return the catalog version that the update will show up in. The client
- * can then wait until the statestore sends an update that contains that catalog
- * version.
- * The CatalogServiceCatalog also manages a global "catalog version". The version
- * is incremented and assigned to a CatalogObject whenever it is
- * added/modified/removed from the catalog. This means each CatalogObject will have a
- * unique version and assigned versions are strictly increasing.
- *
- * Table metadata for IncompleteTables (not fully loaded tables) are loaded in the
- * background by the TableLoadingMgr; tables can be prioritized for loading by calling
- * prioritizeLoad(). Background loading can also be enabled for the catalog, in which
- * case missing tables (tables that are not yet loaded) are submitted to the
- * TableLoadingMgr any table metadata is invalidated and on startup. The metadata of
- * fully loaded tables (e.g. HdfsTable, HBaseTable, etc) are updated in-place and don't
- * trigger a background metadata load through the TableLoadingMgr. Accessing a table
- * that is not yet loaded (via getTable()), will load the table's metadata on-demand,
- * out-of-band of the table loading thread pool.
- *
- * See the class comments in CatalogOpExecutor for a description of the locking protocol
- * that should be employed if both the catalog lock and table locks need to be held at
- * the same time.
- *
- * TODO: Consider removing on-demand loading and have everything go through the table
- * loading thread pool.
- */
-public class CatalogServiceCatalog extends Catalog {
-  private static final Logger LOG = Logger.getLogger(CatalogServiceCatalog.class);
-
-  private final TUniqueId catalogServiceId_;
-
-  // Fair lock used to synchronize reads/writes of catalogVersion_. Because this lock
-  // protects catalogVersion_, it can be used to perform atomic bulk catalog operations
-  // since catalogVersion_ cannot change externally while the lock is being held.
-  // In addition to protecting catalogVersion_, it is currently used for the
-  // following bulk operations:
-  // * Building a delta update to send to the statestore in getCatalogObjects(),
-  //   so a snapshot of the catalog can be taken without any version changes.
-  // * During a catalog invalidation (call to reset()), which re-reads all dbs and tables
-  //   from the metastore.
-  // * During renameTable(), because a table must be removed and added to the catalog
-  //   atomically (potentially in a different database).
-  private final ReentrantReadWriteLock catalogLock_ = new ReentrantReadWriteLock(true);
-
-  // Last assigned catalog version. Starts at INITIAL_CATALOG_VERSION and is incremented
-  // with each update to the Catalog. Continued across the lifetime of the object.
-  // Protected by catalogLock_.
-  // TODO: Handle overflow of catalogVersion_ and nextTableId_.
-  // TODO: The name of this variable is misleading and can be interpreted as a property
-  // of the catalog server. Rename into something that indicates its role as a global
-  // sequence number assigned to catalog objects.
-  private long catalogVersion_ = INITIAL_CATALOG_VERSION;
-
-  protected final AtomicInteger nextTableId_ = new AtomicInteger(0);
-
-  // Manages the scheduling of background table loading.
-  private final TableLoadingMgr tableLoadingMgr_;
-
-  private final boolean loadInBackground_;
-
-  // Periodically polls HDFS to get the latest set of known cache pools.
-  private final ScheduledExecutorService cachePoolReader_ =
-      Executors.newScheduledThreadPool(1);
-
-  // Proxy to access the Sentry Service and also periodically refreshes the
-  // policy metadata. Null if Sentry Service is not enabled.
-  private final SentryProxy sentryProxy_;
-
-  // Local temporary directory to copy UDF Jars.
-  private static final String LOCAL_LIBRARY_PATH = new String("file://" +
-      System.getProperty("java.io.tmpdir"));
-
-  /**
-   * Initialize the CatalogServiceCatalog. If loadInBackground is true, table metadata
-   * will be loaded in the background
-   */
-  public CatalogServiceCatalog(boolean loadInBackground, int numLoadingThreads,
-      SentryConfig sentryConfig, TUniqueId catalogServiceId, String kerberosPrincipal) {
-    super(true);
-    catalogServiceId_ = catalogServiceId;
-    tableLoadingMgr_ = new TableLoadingMgr(this, numLoadingThreads);
-    loadInBackground_ = loadInBackground;
-    try {
-      // We want only 'true' HDFS filesystems to poll the HDFS cache (i.e not S3,
-      // local, etc.)
-      if (FileSystemUtil.getDefaultFileSystem() instanceof DistributedFileSystem) {
-        cachePoolReader_.scheduleAtFixedRate(
-            new CachePoolReader(), 0, 1, TimeUnit.MINUTES);
-      }
-    } catch (IOException e) {
-      LOG.error("Couldn't identify the default FS. Cache Pool reader will be disabled.");
-    }
-    if (sentryConfig != null) {
-      sentryProxy_ = new SentryProxy(sentryConfig, this, kerberosPrincipal);
-    } else {
-      sentryProxy_ = null;
-    }
-  }
-
-  /**
-   * Reads the current set of cache pools from HDFS and updates the catalog.
-   * Called periodically by the cachePoolReader_.
-   */
-  protected class CachePoolReader implements Runnable {
-
-    /**
-     * This constructor is needed to create a non-threaded execution of the class.
-     */
-    public CachePoolReader() {
-      super();
-    }
-
-    public void run() {
-      LOG.trace("Reloading cache pool names from HDFS");
-      // Map of cache pool name to CachePoolInfo. Stored in a map to allow Set operations
-      // to be performed on the keys.
-      Map<String, CachePoolInfo> currentCachePools = Maps.newHashMap();
-      try {
-        DistributedFileSystem dfs = FileSystemUtil.getDistributedFileSystem();
-        RemoteIterator<CachePoolEntry> itr = dfs.listCachePools();
-        while (itr.hasNext()) {
-          CachePoolInfo cachePoolInfo = itr.next().getInfo();
-          currentCachePools.put(cachePoolInfo.getPoolName(), cachePoolInfo);
-        }
-      } catch (Exception e) {
-        LOG.error("Error loading cache pools: ", e);
-        return;
-      }
-
-      catalogLock_.writeLock().lock();
-      try {
-        // Determine what has changed relative to what we have cached.
-        Set<String> droppedCachePoolNames = Sets.difference(
-            hdfsCachePools_.keySet(), currentCachePools.keySet());
-        Set<String> createdCachePoolNames = Sets.difference(
-            currentCachePools.keySet(), hdfsCachePools_.keySet());
-        // Add all new cache pools.
-        for (String createdCachePool: createdCachePoolNames) {
-          HdfsCachePool cachePool = new HdfsCachePool(
-              currentCachePools.get(createdCachePool));
-          cachePool.setCatalogVersion(
-              CatalogServiceCatalog.this.incrementAndGetCatalogVersion());
-          hdfsCachePools_.add(cachePool);
-        }
-        // Remove dropped cache pools.
-        for (String cachePoolName: droppedCachePoolNames) {
-          hdfsCachePools_.remove(cachePoolName);
-          CatalogServiceCatalog.this.incrementAndGetCatalogVersion();
-        }
-      } finally {
-        catalogLock_.writeLock().unlock();
-      }
-    }
-  }
-
-  /**
-   * Adds a list of cache directive IDs for the given table name. Asynchronously
-   * refreshes the table metadata once all cache directives complete.
-   */
-  public void watchCacheDirs(List<Long> dirIds, TTableName tblName) {
-    tableLoadingMgr_.watchCacheDirs(dirIds, tblName);
-  }
-
-  /**
-   * Prioritizes the loading of the given list TCatalogObjects. Currently only support
-   * loading Table/View metadata since Db and Function metadata is not loaded lazily.
-   */
-  public void prioritizeLoad(List<TCatalogObject> objectDescs) {
-    for (TCatalogObject catalogObject: objectDescs) {
-      Preconditions.checkState(catalogObject.isSetTable());
-      TTable table = catalogObject.getTable();
-      tableLoadingMgr_.prioritizeLoad(new TTableName(table.getDb_name().toLowerCase(),
-          table.getTbl_name().toLowerCase()));
-    }
-  }
-
-  /**
-   * Returns all known objects in the Catalog (Tables, Views, Databases, and
-   * Functions). Some metadata may be skipped for objects that have a catalog
-   * version < the specified "fromVersion". Takes a lock on the catalog to ensure this
-   * update contains a consistent snapshot of all items in the catalog. While holding the
-   * catalog lock, it locks each accessed table to protect against concurrent
-   * modifications.
-   */
-  public TGetAllCatalogObjectsResponse getCatalogObjects(long fromVersion) {
-    TGetAllCatalogObjectsResponse resp = new TGetAllCatalogObjectsResponse();
-    resp.setObjects(new ArrayList<TCatalogObject>());
-    resp.setMax_catalog_version(Catalog.INITIAL_CATALOG_VERSION);
-    catalogLock_.readLock().lock();
-    try {
-      for (Db db: getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
-        TCatalogObject catalogDb = new TCatalogObject(TCatalogObjectType.DATABASE,
-            db.getCatalogVersion());
-        catalogDb.setDb(db.toThrift());
-        resp.addToObjects(catalogDb);
-
-        for (String tblName: db.getAllTableNames()) {
-          TCatalogObject catalogTbl = new TCatalogObject(TCatalogObjectType.TABLE,
-              Catalog.INITIAL_CATALOG_VERSION);
-
-          Table tbl = db.getTable(tblName);
-          if (tbl == null) {
-            LOG.error("Table: " + tblName + " was expected to be in the catalog " +
-                "cache. Skipping table for this update.");
-            continue;
-          }
-
-          // Protect the table from concurrent modifications.
-          synchronized(tbl) {
-            // Only add the extended metadata if this table's version is >=
-            // the fromVersion.
-            if (tbl.getCatalogVersion() >= fromVersion) {
-              try {
-                catalogTbl.setTable(tbl.toThrift());
-              } catch (Exception e) {
-                LOG.debug(String.format("Error calling toThrift() on table %s.%s: %s",
-                    db.getName(), tblName, e.getMessage()), e);
-                continue;
-              }
-              catalogTbl.setCatalog_version(tbl.getCatalogVersion());
-            } else {
-              catalogTbl.setTable(new TTable(db.getName(), tblName));
-            }
-          }
-          resp.addToObjects(catalogTbl);
-        }
-
-        for (Function fn: db.getFunctions(null, new PatternMatcher())) {
-          TCatalogObject function = new TCatalogObject(TCatalogObjectType.FUNCTION,
-              fn.getCatalogVersion());
-          function.setFn(fn.toThrift());
-          resp.addToObjects(function);
-        }
-      }
-
-      for (DataSource dataSource: getDataSources()) {
-        TCatalogObject catalogObj = new TCatalogObject(TCatalogObjectType.DATA_SOURCE,
-            dataSource.getCatalogVersion());
-        catalogObj.setData_source(dataSource.toThrift());
-        resp.addToObjects(catalogObj);
-      }
-      for (HdfsCachePool cachePool: hdfsCachePools_) {
-        TCatalogObject pool = new TCatalogObject(TCatalogObjectType.HDFS_CACHE_POOL,
-            cachePool.getCatalogVersion());
-        pool.setCache_pool(cachePool.toThrift());
-        resp.addToObjects(pool);
-      }
-
-      // Get all roles
-      for (Role role: authPolicy_.getAllRoles()) {
-        TCatalogObject thriftRole = new TCatalogObject();
-        thriftRole.setRole(role.toThrift());
-        thriftRole.setCatalog_version(role.getCatalogVersion());
-        thriftRole.setType(role.getCatalogObjectType());
-        resp.addToObjects(thriftRole);
-
-        for (RolePrivilege p: role.getPrivileges()) {
-          TCatalogObject privilege = new TCatalogObject();
-          privilege.setPrivilege(p.toThrift());
-          privilege.setCatalog_version(p.getCatalogVersion());
-          privilege.setType(p.getCatalogObjectType());
-          resp.addToObjects(privilege);
-        }
-      }
-
-      // Each update should contain a single "TCatalog" object which is used to
-      // pass overall state on the catalog, such as the current version and the
-      // catalog service id.
-      TCatalogObject catalog = new TCatalogObject();
-      catalog.setType(TCatalogObjectType.CATALOG);
-      // By setting the catalog version to the latest catalog version at this point,
-      // it ensure impalads will always bump their versions, even in the case where
-      // an object has been dropped.
-      catalog.setCatalog_version(getCatalogVersion());
-      catalog.setCatalog(new TCatalog(catalogServiceId_));
-      resp.addToObjects(catalog);
-
-      // The max version is the max catalog version of all items in the update.
-      resp.setMax_catalog_version(getCatalogVersion());
-      return resp;
-    } finally {
-      catalogLock_.readLock().unlock();
-    }
-  }
-
-  /**
-   * Returns all user defined functions (aggregate and scalar) in the specified database.
-   * Functions are not returned in a defined order.
-   */
-  public List<Function> getFunctions(String dbName) throws DatabaseNotFoundException {
-    Db db = getDb(dbName);
-    if (db == null) {
-      throw new DatabaseNotFoundException("Database does not exist: " + dbName);
-    }
-
-    // Contains map of overloaded function names to all functions matching that name.
-    HashMap<String, List<Function>> dbFns = db.getAllFunctions();
-    List<Function> fns = new ArrayList<Function>(dbFns.size());
-    for (List<Function> fnOverloads: dbFns.values()) {
-      for (Function fn: fnOverloads) {
-        fns.add(fn);
-      }
-    }
-    return fns;
-  }
-
-  /**
-   * Checks if the Hive function 'fn' is Impala compatible. A function is Impala
-   * compatible iff
-   *
-   * 1. The function is JAVA based,
-   * 2. Has exactly one binary resource associated (We don't support loading
-   *    dependencies yet) and
-   * 3. The binary is of type JAR.
-   *
-   * Returns true if compatible and false otherwise. In case of incompatible
-   * functions 'incompatMsg' has the reason for the incompatibility.
-   * */
-   public static boolean isFunctionCompatible(
-       org.apache.hadoop.hive.metastore.api.Function fn, StringBuilder incompatMsg) {
-    boolean isCompatible = true;
-    if (fn.getFunctionType() != FunctionType.JAVA) {
-      isCompatible = false;
-      incompatMsg.append("Function type: " + fn.getFunctionType().name()
-          + " is not supported. Only " + FunctionType.JAVA.name() + " functions "
-          + "are supported.");
-    } else if (fn.getResourceUrisSize() == 0) {
-      isCompatible = false;
-      incompatMsg.append("No executable binary resource (like a JAR file) is " +
-          "associated with this function. To fix this, recreate the function by " +
-          "specifying a 'location' in the function create statement.");
-    } else if (fn.getResourceUrisSize() != 1) {
-      isCompatible = false;
-      List<String> resourceUris = Lists.newArrayList();
-      for (ResourceUri resource: fn.getResourceUris()) {
-        resourceUris.add(resource.getUri());
-      }
-      incompatMsg.append("Impala does not support multiple Jars for dependencies."
-          + "(" + Joiner.on(",").join(resourceUris) + ") ");
-    } else if (fn.getResourceUris().get(0).getResourceType() != ResourceType.JAR) {
-      isCompatible = false;
-      incompatMsg.append("Function binary type: " +
-        fn.getResourceUris().get(0).getResourceType().name()
-        + " is not supported. Only " + ResourceType.JAR.name()
-        + " type is supported.");
-    }
-    return isCompatible;
-  }
-
-  /**
-   * Returns a list of Impala Functions, one per compatible "evaluate" method in the UDF
-   * class referred to by the given Java function. This method copies the UDF Jar
-   * referenced by "function" to a temporary file in "LOCAL_LIBRARY_PATH" and loads it
-   * into the jvm. Then we scan all the methods in the class using reflection and extract
-   * those methods and create corresponding Impala functions. Currently Impala supports
-   * only "JAR" files for symbols and also a single Jar containing all the dependent
-   * classes rather than a set of Jar files.
-   */
-  public static List<Function> extractFunctions(String db,
-      org.apache.hadoop.hive.metastore.api.Function function)
-      throws ImpalaRuntimeException{
-    List<Function> result = Lists.newArrayList();
-    List<String> addedSignatures = Lists.newArrayList();
-    StringBuilder warnMessage = new StringBuilder();
-    if (!isFunctionCompatible(function, warnMessage)) {
-      LOG.warn("Skipping load of incompatible function: " +
-          function.getFunctionName() + ". " + warnMessage.toString());
-      return result;
-    }
-    String jarUri = function.getResourceUris().get(0).getUri();
-    Class<?> udfClass = null;
-    try {
-      Path localJarPath = new Path(LOCAL_LIBRARY_PATH,
-          UUID.randomUUID().toString() + ".jar");
-      try {
-        FileSystemUtil.copyToLocal(new Path(jarUri), localJarPath);
-      } catch (IOException e) {
-        String errorMsg = "Error loading Java function: " + db + "." +
-            function.getFunctionName() + ". Couldn't copy " + jarUri +
-            " to local path: " + localJarPath.toString();
-        LOG.error(errorMsg, e);
-        throw new ImpalaRuntimeException(errorMsg);
-      }
-      URL[] classLoaderUrls = new URL[] {new URL(localJarPath.toString())};
-      URLClassLoader urlClassLoader = new URLClassLoader(classLoaderUrls);
-      udfClass = urlClassLoader.loadClass(function.getClassName());
-      // Check if the class is of UDF type. Currently we don't support other functions
-      // TODO: Remove this once we support Java UDAF/UDTF
-      if (FunctionUtils.getUDFClassType(udfClass) != FunctionUtils.UDFClassType.UDF) {
-        LOG.warn("Ignoring load of incompatible Java function: " +
-            function.getFunctionName() + " as " + FunctionUtils.getUDFClassType(udfClass)
-            + " is not a supported type. Only UDFs are supported");
-        return result;
-      }
-      // Load each method in the UDF class and create the corresponding Impala Function
-      // object.
-      for (Method m: udfClass.getMethods()) {
-        if (!m.getName().equals(UdfExecutor.UDF_FUNCTION_NAME)) continue;
-        Function fn = ScalarFunction.fromHiveFunction(db,
-            function.getFunctionName(), function.getClassName(),
-            m.getParameterTypes(), m.getReturnType(), jarUri);
-        if (fn == null) {
-          LOG.warn("Ignoring incompatible method: " + m.toString() + " during load of " +
-             "Hive UDF:" + function.getFunctionName() + " from " + udfClass);
-          continue;
-        }
-        if (!addedSignatures.contains(fn.signatureString())) {
-          result.add(fn);
-          addedSignatures.add(fn.signatureString());
-        }
-      }
-    } catch (ClassNotFoundException c) {
-      String errorMsg = "Error loading Java function: " + db + "." +
-          function.getFunctionName() + ". Symbol class " + udfClass +
-          "not found in Jar: " + jarUri;
-      LOG.error(errorMsg);
-      throw new ImpalaRuntimeException(errorMsg, c);
-    } catch (Exception e) {
-      LOG.error("Skipping function load: " + function.getFunctionName(), e);
-      throw new ImpalaRuntimeException("Error extracting functions", e);
-    } catch (LinkageError e) {
-      String errorMsg = "Error resolving dependencies for Java function: " + db + "." +
-          function.getFunctionName();
-      LOG.error(errorMsg);
-      throw new ImpalaRuntimeException(errorMsg, e);
-    }
-    return result;
-  }
-
- /**
-   * Extracts Impala functions stored in metastore db parameters and adds them to
-   * the catalog cache.
-   */
-  private void loadFunctionsFromDbParams(Db db,
-      org.apache.hadoop.hive.metastore.api.Database msDb) {
-    if (msDb == null || msDb.getParameters() == null) return;
-    LOG.info("Loading native functions for database: " + db.getName());
-    TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
-    for (String key: msDb.getParameters().keySet()) {
-      if (!key.startsWith(Db.FUNCTION_INDEX_PREFIX)) continue;
-      try {
-        TFunction fn = new TFunction();
-        JniUtil.deserializeThrift(protocolFactory, fn,
-            Base64.decodeBase64(msDb.getParameters().get(key)));
-        Function addFn = Function.fromThrift(fn);
-        db.addFunction(addFn, false);
-        addFn.setCatalogVersion(incrementAndGetCatalogVersion());
-      } catch (ImpalaException e) {
-        LOG.error("Encountered an error during function load: key=" + key
-            + ",continuing", e);
-      }
-    }
-  }
-
-  /**
-   * Loads Java functions into the catalog. For each function in "functions",
-   * we extract all Impala compatible evaluate() signatures and load them
-   * as separate functions in the catalog.
-   */
-  private void loadJavaFunctions(Db db,
-      List<org.apache.hadoop.hive.metastore.api.Function> functions) {
-    Preconditions.checkNotNull(functions);
-    LOG.info("Loading Java functions for database: " + db.getName());
-    for (org.apache.hadoop.hive.metastore.api.Function function: functions) {
-      try {
-        for (Function fn: extractFunctions(db.getName(), function)) {
-          db.addFunction(fn);
-          fn.setCatalogVersion(incrementAndGetCatalogVersion());
-        }
-      } catch (Exception e) {
-        LOG.error("Skipping function load: " + function.getFunctionName(), e);
-      }
-    }
-  }
-
-  /**
-   * Invalidates the database 'db'. This method can have potential race
-   * conditions with external changes to the Hive metastore and hence any
-   * conflicting changes to the objects can manifest in the form of exceptions
-   * from the HMS calls which are appropriately handled. Returns the invalidated
-   * 'Db' object along with list of tables to be loaded by the TableLoadingMgr.
-   * Returns null if the method encounters an exception during invalidation.
-   */
-  private Pair<Db, List<TTableName>> invalidateDb(
-      MetaStoreClient msClient, String dbName, Db existingDb) {
-    try {
-      List<org.apache.hadoop.hive.metastore.api.Function> javaFns =
-          Lists.newArrayList();
-      for (String javaFn: msClient.getHiveClient().getFunctions(dbName, "*")) {
-        javaFns.add(msClient.getHiveClient().getFunction(dbName, javaFn));
-      }
-      org.apache.hadoop.hive.metastore.api.Database msDb =
-          msClient.getHiveClient().getDatabase(dbName);
-      Db newDb = new Db(dbName, this, msDb);
-      // existingDb is usually null when the Catalog loads for the first time.
-      // In that case we needn't restore any transient functions.
-      if (existingDb != null) {
-        // Restore UDFs that aren't persisted. They are only cleaned up on
-        // Catalog restart.
-        for (Function fn: existingDb.getTransientFunctions()) {
-          newDb.addFunction(fn);
-          fn.setCatalogVersion(incrementAndGetCatalogVersion());
-        }
-      }
-      // Reload native UDFs.
-      loadFunctionsFromDbParams(newDb, msDb);
-      // Reload Java UDFs from HMS.
-      loadJavaFunctions(newDb, javaFns);
-      newDb.setCatalogVersion(incrementAndGetCatalogVersion());
-
-      List<TTableName> tblsToBackgroundLoad = Lists.newArrayList();
-      for (String tableName: msClient.getHiveClient().getAllTables(dbName)) {
-        Table incompleteTbl = IncompleteTable.createUninitializedTable(
-            getNextTableId(), newDb, tableName);
-        incompleteTbl.setCatalogVersion(incrementAndGetCatalogVersion());
-        newDb.addTable(incompleteTbl);
-        if (loadInBackground_) {
-          tblsToBackgroundLoad.add(new TTableName(dbName, tableName.toLowerCase()));
-        }
-      }
-      return Pair.create(newDb, tblsToBackgroundLoad);
-    } catch (Exception e) {
-      LOG.warn("Encountered an exception while invalidating database: " + dbName +
-          ". Ignoring further load of this db.", e);
-    }
-    return null;
-  }
-
-  /**
-   * Resets this catalog instance by clearing all cached table and database metadata.
-   */
-  public void reset() throws CatalogException {
-    // First update the policy metadata.
-    if (sentryProxy_ != null) {
-      // Sentry Service is enabled.
-      try {
-        // Update the authorization policy, waiting for the result to complete.
-        sentryProxy_.refresh();
-      } catch (Exception e) {
-        throw new CatalogException("Error updating authorization policy: ", e);
-      }
-    }
-
-    catalogLock_.writeLock().lock();
-    try {
-      nextTableId_.set(0);
-
-      // Not all Java UDFs are persisted to the metastore. The ones which aren't
-      // should be restored once the catalog has been invalidated.
-      Map<String, Db> oldDbCache = dbCache_.get();
-
-      // Build a new DB cache, populate it, and replace the existing cache in one
-      // step.
-      ConcurrentHashMap<String, Db> newDbCache = new ConcurrentHashMap<String, Db>();
-      List<TTableName> tblsToBackgroundLoad = Lists.newArrayList();
-      try (MetaStoreClient msClient = getMetaStoreClient()) {
-        for (String dbName: msClient.getHiveClient().getAllDatabases()) {
-          dbName = dbName.toLowerCase();
-          Db oldDb = oldDbCache.get(dbName);
-          Pair<Db, List<TTableName>> invalidatedDb = invalidateDb(msClient,
-              dbName, oldDb);
-          if (invalidatedDb == null) continue;
-          newDbCache.put(dbName, invalidatedDb.first);
-          tblsToBackgroundLoad.addAll(invalidatedDb.second);
-        }
-      }
-      dbCache_.set(newDbCache);
-      // Submit tables for background loading.
-      for (TTableName tblName: tblsToBackgroundLoad) {
-        tableLoadingMgr_.backgroundLoad(tblName);
-      }
-    } catch (Exception e) {
-      LOG.error(e);
-      throw new CatalogException("Error initializing Catalog. Catalog may be empty.", e);
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Adds a database name to the metadata cache and returns the database's
-   * new Db object. Used by CREATE DATABASE statements.
-   */
-  public Db addDb(String dbName, org.apache.hadoop.hive.metastore.api.Database msDb)
-      throws ImpalaException {
-    Db newDb = new Db(dbName, this, msDb);
-    newDb.setCatalogVersion(incrementAndGetCatalogVersion());
-    addDb(newDb);
-    return newDb;
-  }
-
-  /**
-   * Removes a database from the metadata cache and returns the removed database,
-   * or null if the database did not exist in the cache.
-   * Used by DROP DATABASE statements.
-   */
-  @Override
-  public Db removeDb(String dbName) {
-    Db removedDb = super.removeDb(dbName);
-    if (removedDb != null) {
-      removedDb.setCatalogVersion(incrementAndGetCatalogVersion());
-    }
-    return removedDb;
-  }
-
-  /**
-   * Adds a table with the given name to the catalog and returns the new table,
-   * loading the metadata if needed.
-   */
-  public Table addTable(String dbName, String tblName) throws TableNotFoundException {
-    Db db = getDb(dbName);
-    if (db == null) return null;
-    Table incompleteTable =
-        IncompleteTable.createUninitializedTable(getNextTableId(), db, tblName);
-    incompleteTable.setCatalogVersion(incrementAndGetCatalogVersion());
-    db.addTable(incompleteTable);
-    return db.getTable(tblName);
-  }
-
-  /**
-   * Gets the table with the given name, loading it if needed (if the existing catalog
-   * object is not yet loaded). Returns the matching Table or null if no table with this
-   * name exists in the catalog.
-   * If the existing table is dropped or modified (indicated by the catalog version
-   * changing) while the load is in progress, the loaded value will be discarded
-   * and the current cached value will be returned. This may mean that a missing table
-   * (not yet loaded table) will be returned.
-   */
-  public Table getOrLoadTable(String dbName, String tblName)
-      throws CatalogException {
-    TTableName tableName = new TTableName(dbName.toLowerCase(), tblName.toLowerCase());
-    TableLoadingMgr.LoadRequest loadReq;
-
-    long previousCatalogVersion;
-    // Return the table if it is already loaded or submit a new load request.
-    catalogLock_.readLock().lock();
-    try {
-      Table tbl = getTable(dbName, tblName);
-      if (tbl == null || tbl.isLoaded()) return tbl;
-      previousCatalogVersion = tbl.getCatalogVersion();
-      loadReq = tableLoadingMgr_.loadAsync(tableName);
-    } finally {
-      catalogLock_.readLock().unlock();
-    }
-    Preconditions.checkNotNull(loadReq);
-    try {
-      // The table may have been dropped/modified while the load was in progress, so only
-      // apply the update if the existing table hasn't changed.
-      return replaceTableIfUnchanged(loadReq.get(), previousCatalogVersion);
-    } finally {
-      loadReq.close();
-    }
-  }
-
-  /**
-   * Replaces an existing Table with a new value if it exists and has not changed
-   * (has the same catalog version as 'expectedCatalogVersion').
-   */
-  private Table replaceTableIfUnchanged(Table updatedTbl, long expectedCatalogVersion)
-      throws DatabaseNotFoundException {
-    catalogLock_.writeLock().lock();
-    try {
-      Db db = getDb(updatedTbl.getDb().getName());
-      if (db == null) {
-        throw new DatabaseNotFoundException(
-            "Database does not exist: " + updatedTbl.getDb().getName());
-      }
-
-      Table existingTbl = db.getTable(updatedTbl.getName());
-      // The existing table does not exist or has been modified. Instead of
-      // adding the loaded value, return the existing table.
-      if (existingTbl == null ||
-          existingTbl.getCatalogVersion() != expectedCatalogVersion) return existingTbl;
-
-      updatedTbl.setCatalogVersion(incrementAndGetCatalogVersion());
-      db.addTable(updatedTbl);
-      return updatedTbl;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes a table from the catalog and increments the catalog version.
-   * Returns the removed Table, or null if the table or db does not exist.
-   */
-  public Table removeTable(String dbName, String tblName) {
-    Db parentDb = getDb(dbName);
-    if (parentDb == null) return null;
-
-    Table removedTable = parentDb.removeTable(tblName);
-    if (removedTable != null) {
-      removedTable.setCatalogVersion(incrementAndGetCatalogVersion());
-    }
-    return removedTable;
-  }
-
-  /**
-   * Removes a function from the catalog. Increments the catalog version and returns
-   * the Function object that was removed. If the function did not exist, null will
-   * be returned.
-   */
-  @Override
-  public Function removeFunction(Function desc) {
-    Function removedFn = super.removeFunction(desc);
-    if (removedFn != null) {
-      removedFn.setCatalogVersion(incrementAndGetCatalogVersion());
-    }
-    return removedFn;
-  }
-
-  /**
-   * Adds a function from the catalog, incrementing the catalog version. Returns true if
-   * the add was successful, false otherwise.
-   */
-  @Override
-  public boolean addFunction(Function fn) {
-    Db db = getDb(fn.getFunctionName().getDb());
-    if (db == null) return false;
-    if (db.addFunction(fn)) {
-      fn.setCatalogVersion(incrementAndGetCatalogVersion());
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Adds a data source to the catalog, incrementing the catalog version. Returns true
-   * if the add was successful, false otherwise.
-   */
-  @Override
-  public boolean addDataSource(DataSource dataSource) {
-    if (dataSources_.add(dataSource)) {
-      dataSource.setCatalogVersion(incrementAndGetCatalogVersion());
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public DataSource removeDataSource(String dataSourceName) {
-    DataSource dataSource = dataSources_.remove(dataSourceName);
-    if (dataSource != null) {
-      dataSource.setCatalogVersion(incrementAndGetCatalogVersion());
-    }
-    return dataSource;
-  }
-
-  /**
-   * Returns the table parameter 'transient_lastDdlTime', or -1 if it's not set.
-   * TODO: move this to a metastore helper class.
-   */
-  public static long getLastDdlTime(org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    Preconditions.checkNotNull(msTbl);
-    Map<String, String> params = msTbl.getParameters();
-    String lastDdlTimeStr = params.get("transient_lastDdlTime");
-    if (lastDdlTimeStr != null) {
-      try {
-        return Long.parseLong(lastDdlTimeStr);
-      } catch (NumberFormatException e) {}
-    }
-    return -1;
-  }
-
-  /**
-   * Updates the cached lastDdlTime for the given table. The lastDdlTime is used during
-   * the metadata refresh() operations to determine if there have been any external
-   * (outside of Impala) modifications to the table.
-   */
-  public void updateLastDdlTime(TTableName tblName, long ddlTime) {
-    Db db = getDb(tblName.getDb_name());
-    if (db == null) return;
-    Table tbl = db.getTable(tblName.getTable_name());
-    if (tbl == null) return;
-    tbl.updateLastDdlTime(ddlTime);
-  }
-
-  /**
-   * Renames a table. Equivalent to an atomic drop + add of the table. Returns
-   * the new Table object with an incremented catalog version or null if operation
-   * was not successful.
-   */
-  public Table renameTable(TTableName oldTableName, TTableName newTableName)
-      throws CatalogException {
-    // Remove the old table name from the cache and add the new table.
-    Db db = getDb(oldTableName.getDb_name());
-    if (db != null) db.removeTable(oldTableName.getTable_name());
-    return addTable(newTableName.getDb_name(), newTableName.getTable_name());
-  }
-
-  /**
-   * Reloads metadata for table 'tbl'. If 'tbl' is an IncompleteTable, it makes an
-   * asynchronous request to the table loading manager to create a proper table instance
-   * and load the metadata from Hive Metastore. Otherwise, it updates table metadata
-   * in-place by calling the load() function on the specified table. Returns 'tbl', if it
-   * is a fully loaded table (e.g. HdfsTable, HBaseTable, etc). Otherwise, returns a
-   * newly constructed fully loaded table. Applies proper synchronization to protect the
-   * metadata load from concurrent table modifications and assigns a new catalog version.
-   * Throws a CatalogException if there is an error loading table metadata.
-   */
-  public Table reloadTable(Table tbl) throws CatalogException {
-    LOG.debug(String.format("Refreshing table metadata: %s", tbl.getFullName()));
-    TTableName tblName = new TTableName(tbl.getDb().getName().toLowerCase(),
-        tbl.getName().toLowerCase());
-    Db db = tbl.getDb();
-    if (tbl instanceof IncompleteTable) {
-      TableLoadingMgr.LoadRequest loadReq;
-      long previousCatalogVersion;
-      // Return the table if it is already loaded or submit a new load request.
-      catalogLock_.readLock().lock();
-      try {
-        previousCatalogVersion = tbl.getCatalogVersion();
-        loadReq = tableLoadingMgr_.loadAsync(tblName);
-      } finally {
-        catalogLock_.readLock().unlock();
-      }
-      Preconditions.checkNotNull(loadReq);
-      try {
-        // The table may have been dropped/modified while the load was in progress, so
-        // only apply the update if the existing table hasn't changed.
-        return replaceTableIfUnchanged(loadReq.get(), previousCatalogVersion);
-      } finally {
-        loadReq.close();
-      }
-    }
-
-    catalogLock_.writeLock().lock();
-    synchronized(tbl) {
-      long newCatalogVersion = incrementAndGetCatalogVersion();
-      catalogLock_.writeLock().unlock();
-      try (MetaStoreClient msClient = getMetaStoreClient()) {
-        org.apache.hadoop.hive.metastore.api.Table msTbl = null;
-        try {
-          msTbl = msClient.getHiveClient().getTable(db.getName(),
-              tblName.getTable_name());
-        } catch (Exception e) {
-          throw new TableLoadingException("Error loading metadata for table: " +
-              db.getName() + "." + tblName.getTable_name(), e);
-        }
-        tbl.load(true, msClient.getHiveClient(), msTbl);
-      }
-      tbl.setCatalogVersion(newCatalogVersion);
-      return tbl;
-    }
-  }
-
-  /**
-   * Reloads the metadata of a table with name 'tableName'. Returns the table or null if
-   * the table does not exist.
-   */
-  public Table reloadTable(TTableName tableName) throws CatalogException {
-    Table table = getTable(tableName.getDb_name(), tableName.getTable_name());
-    if (table == null) return null;
-    return reloadTable(table);
-  }
-
-  /**
-   * Drops the partition specified in 'partitionSpec' from 'tbl'. Throws a
-   * CatalogException if 'tbl' is not an HdfsTable. If the partition having the given
-   * partition spec does not exist, null is returned. Otherwise, the modified table is
-   * returned.
-   */
-  public Table dropPartition(Table tbl, List<TPartitionKeyValue> partitionSpec)
-      throws CatalogException {
-    Preconditions.checkNotNull(tbl);
-    Preconditions.checkNotNull(partitionSpec);
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    if (!(tbl instanceof HdfsTable)) {
-      throw new CatalogException("Table " + tbl.getFullName() + " is not an Hdfs table");
-    }
-    HdfsTable hdfsTable = (HdfsTable) tbl;
-    if (hdfsTable.dropPartition(partitionSpec) == null) return null;
-    return hdfsTable;
-  }
-
-  /**
-   * Adds a partition to its HdfsTable and returns the modified table.
-   */
-  public Table addPartition(HdfsPartition partition) throws CatalogException {
-    Preconditions.checkNotNull(partition);
-    HdfsTable hdfsTable = partition.getTable();
-    Db db = getDb(hdfsTable.getDb().getName());
-    hdfsTable.addPartition(partition);
-    return hdfsTable;
-  }
-
-  /**
-   * Invalidates the table in the catalog cache, potentially adding/removing the table
-   * from the cache based on whether it exists in the Hive Metastore.
-   * The invalidation logic is:
-   * - If the table exists in the metastore, add it to the catalog as an uninitialized
-   *   IncompleteTable (replacing any existing entry). The table metadata will be
-   *   loaded lazily, on the next access. If the parent database for this table does not
-   *   yet exist in Impala's cache it will also be added.
-   * - If the table does not exist in the metastore, remove it from the catalog cache.
-   * - If we are unable to determine whether the table exists in the metastore (there was
-   *   an exception thrown making the RPC), invalidate any existing Table by replacing
-   *   it with an uninitialized IncompleteTable.
-   *
-   * The parameter updatedObjects is a Pair that contains details on what catalog objects
-   * were modified as a result of the invalidateTable() call. The first item in the Pair
-   * is a Db which will only be set if a new database was added as a result of this call,
-   * otherwise it will be null. The second item in the Pair is the Table that was
-   * modified/added/removed.
-   * Returns a flag that indicates whether the items in updatedObjects were removed
-   * (returns true) or added/modified (return false). Only Tables should ever be removed.
-   */
-  public boolean invalidateTable(TTableName tableName, Pair<Db, Table> updatedObjects) {
-    Preconditions.checkNotNull(updatedObjects);
-    updatedObjects.first = null;
-    updatedObjects.second = null;
-    LOG.debug(String.format("Invalidating table metadata: %s.%s",
-        tableName.getDb_name(), tableName.getTable_name()));
-    String dbName = tableName.getDb_name();
-    String tblName = tableName.getTable_name();
-
-    // Stores whether the table exists in the metastore. Can have three states:
-    // 1) true - Table exists in metastore.
-    // 2) false - Table does not exist in metastore.
-    // 3) unknown (null) - There was exception thrown by the metastore client.
-    Boolean tableExistsInMetaStore;
-    Db db = null;
-    try (MetaStoreClient msClient = getMetaStoreClient()) {
-      org.apache.hadoop.hive.metastore.api.Database msDb = null;
-      try {
-        tableExistsInMetaStore = msClient.getHiveClient().tableExists(dbName, tblName);
-      } catch (UnknownDBException e) {
-        // The parent database does not exist in the metastore. Treat this the same
-        // as if the table does not exist.
-        tableExistsInMetaStore = false;
-      } catch (TException e) {
-        LOG.error("Error executing tableExists() metastore call: " + tblName, e);
-        tableExistsInMetaStore = null;
-      }
-
-      if (tableExistsInMetaStore != null && !tableExistsInMetaStore) {
-        updatedObjects.second = removeTable(dbName, tblName);
-        return true;
-      }
-
-      db = getDb(dbName);
-      if ((db == null || !db.containsTable(tblName)) && tableExistsInMetaStore == null) {
-        // The table does not exist in our cache AND it is unknown whether the
-        // table exists in the metastore. Do nothing.
-        return false;
-      } else if (db == null && tableExistsInMetaStore) {
-        // The table exists in the metastore, but our cache does not contain the parent
-        // database. A new db will be added to the cache along with the new table. msDb
-        // must be valid since tableExistsInMetaStore is true.
-        try {
-          msDb = msClient.getHiveClient().getDatabase(dbName);
-          Preconditions.checkNotNull(msDb);
-          db = new Db(dbName, this, msDb);
-          db.setCatalogVersion(incrementAndGetCatalogVersion());
-          addDb(db);
-          updatedObjects.first = db;
-        } catch (TException e) {
-          // The metastore database cannot be get. Log the error and return.
-          LOG.error("Error executing getDatabase() metastore call: " + dbName, e);
-          return false;
-        }
-      }
-    }
-
-    // Add a new uninitialized table to the table cache, effectively invalidating
-    // any existing entry. The metadata for the table will be loaded lazily, on the
-    // on the next access to the table.
-    Table newTable = IncompleteTable.createUninitializedTable(
-        getNextTableId(), db, tblName);
-    newTable.setCatalogVersion(incrementAndGetCatalogVersion());
-    db.addTable(newTable);
-    if (loadInBackground_) {
-      tableLoadingMgr_.backgroundLoad(new TTableName(dbName.toLowerCase(),
-          tblName.toLowerCase()));
-    }
-    updatedObjects.second = newTable;
-    return false;
-  }
-
-  /**
-   * Adds a new role with the given name and grant groups to the AuthorizationPolicy.
-   * If a role with the same name already exists it will be overwritten.
-   */
-  public Role addRole(String roleName, Set<String> grantGroups) {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = new Role(roleName, grantGroups);
-      role.setCatalogVersion(incrementAndGetCatalogVersion());
-      authPolicy_.addRole(role);
-      return role;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes the role with the given name from the AuthorizationPolicy. Returns the
-   * removed role with an incremented catalog version, or null if no role with this name
-   * exists.
-   */
-  public Role removeRole(String roleName) {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = authPolicy_.removeRole(roleName);
-      if (role == null) return null;
-      role.setCatalogVersion(incrementAndGetCatalogVersion());
-      return role;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Adds a grant group to the given role name and returns the modified Role with
-   * an updated catalog version. If the role does not exist a CatalogException is thrown.
-   */
-  public Role addRoleGrantGroup(String roleName, String groupName)
-      throws CatalogException {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = authPolicy_.addGrantGroup(roleName, groupName);
-      Preconditions.checkNotNull(role);
-      role.setCatalogVersion(incrementAndGetCatalogVersion());
-      return role;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes a grant group from the given role name and returns the modified Role with
-   * an updated catalog version. If the role does not exist a CatalogException is thrown.
-   */
-  public Role removeRoleGrantGroup(String roleName, String groupName)
-      throws CatalogException {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = authPolicy_.removeGrantGroup(roleName, groupName);
-      Preconditions.checkNotNull(role);
-      role.setCatalogVersion(incrementAndGetCatalogVersion());
-      return role;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Adds a privilege to the given role name. Returns the new RolePrivilege and
-   * increments the catalog version. If the parent role does not exist a CatalogException
-   * is thrown.
-   */
-  public RolePrivilege addRolePrivilege(String roleName, TPrivilege thriftPriv)
-      throws CatalogException {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = authPolicy_.getRole(roleName);
-      if (role == null) throw new CatalogException("Role does not exist: " + roleName);
-      RolePrivilege priv = RolePrivilege.fromThrift(thriftPriv);
-      priv.setCatalogVersion(incrementAndGetCatalogVersion());
-      authPolicy_.addPrivilege(priv);
-      return priv;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes a RolePrivilege from the given role name. Returns the removed
-   * RolePrivilege with an incremented catalog version or null if no matching privilege
-   * was found. Throws a CatalogException if no role exists with this name.
-   */
-  public RolePrivilege removeRolePrivilege(String roleName, TPrivilege thriftPriv)
-      throws CatalogException {
-    catalogLock_.writeLock().lock();
-    try {
-      Role role = authPolicy_.getRole(roleName);
-      if (role == null) throw new CatalogException("Role does not exist: " + roleName);
-      RolePrivilege rolePrivilege =
-          role.removePrivilege(thriftPriv.getPrivilege_name());
-      if (rolePrivilege == null) return null;
-      rolePrivilege.setCatalogVersion(incrementAndGetCatalogVersion());
-      return rolePrivilege;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Gets a RolePrivilege from the given role name. Returns the privilege if it exists,
-   * or null if no privilege matching the privilege spec exist.
-   * Throws a CatalogException if the role does not exist.
-   */
-  public RolePrivilege getRolePrivilege(String roleName, TPrivilege privSpec)
-      throws CatalogException {
-    catalogLock_.readLock().lock();
-    try {
-      Role role = authPolicy_.getRole(roleName);
-      if (role == null) throw new CatalogException("Role does not exist: " + roleName);
-      return role.getPrivilege(privSpec.getPrivilege_name());
-    } finally {
-      catalogLock_.readLock().unlock();
-    }
-  }
-
-  /**
-   * Increments the current Catalog version and returns the new value.
-   */
-  public long incrementAndGetCatalogVersion() {
-    catalogLock_.writeLock().lock();
-    try {
-      return ++catalogVersion_;
-    } finally {
-      catalogLock_.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns the current Catalog version.
-   */
-  public long getCatalogVersion() {
-    catalogLock_.readLock().lock();
-    try {
-      return catalogVersion_;
-    } finally {
-      catalogLock_.readLock().unlock();
-    }
-  }
-
-  public ReentrantReadWriteLock getLock() { return catalogLock_; }
-
-  /**
-   * Gets the next table ID and increments the table ID counter.
-   */
-  public TableId getNextTableId() { return new TableId(nextTableId_.getAndIncrement()); }
-  public SentryProxy getSentryProxy() { return sentryProxy_; }
-  public AuthorizationPolicy getAuthPolicy() { return authPolicy_; }
-
-  /**
-   * Reloads metadata for the partition defined by the partition spec
-   * 'partitionSpec' in table 'tbl'. Returns the table object with partition
-   * metadata reloaded
-   */
-  public Table reloadPartition(Table tbl, List<TPartitionKeyValue> partitionSpec)
-      throws CatalogException {
-    catalogLock_.writeLock().lock();
-    synchronized (tbl) {
-      long newCatalogVersion = incrementAndGetCatalogVersion();
-      catalogLock_.writeLock().unlock();
-      HdfsTable hdfsTable = (HdfsTable) tbl;
-      HdfsPartition hdfsPartition = hdfsTable
-          .getPartitionFromThriftPartitionSpec(partitionSpec);
-      // Retrieve partition name from existing partition or construct it from
-      // the partition spec
-      String partitionName = hdfsPartition == null
-          ? HdfsTable.constructPartitionName(partitionSpec)
-          : hdfsPartition.getPartitionName();
-      LOG.debug(String.format("Refreshing Partition metadata: %s %s",
-          hdfsTable.getFullName(), partitionName));
-      try (MetaStoreClient msClient = getMetaStoreClient()) {
-        org.apache.hadoop.hive.metastore.api.Partition hmsPartition = null;
-        try {
-          hmsPartition = msClient.getHiveClient().getPartition(
-              hdfsTable.getDb().getName(), hdfsTable.getName(), partitionName);
-        } catch (NoSuchObjectException e) {
-          // If partition does not exist in Hive Metastore, remove it from the
-          // catalog
-          if (hdfsPartition != null) {
-            hdfsTable.dropPartition(partitionSpec);
-            hdfsTable.setCatalogVersion(newCatalogVersion);
-          }
-          return hdfsTable;
-        } catch (Exception e) {
-          throw new CatalogException("Error loading metadata for partition: "
-              + hdfsTable.getFullName() + " " + partitionName, e);
-        }
-        hdfsTable.reloadPartition(hdfsPartition, hmsPartition);
-      }
-      hdfsTable.setCatalogVersion(newCatalogVersion);
-      return hdfsTable;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Column.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Column.java b/fe/src/main/java/com/cloudera/impala/catalog/Column.java
deleted file mode 100644
index b2d7416..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Column.java
+++ /dev/null
@@ -1,132 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnStats;
-import com.google.common.base.Function;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Internal representation of column-related metadata.
- * Owned by Catalog instance.
- */
-public class Column {
-  private final static Logger LOG = LoggerFactory.getLogger(Column.class);
-
-  protected final String name_;
-  protected final Type type_;
-  protected final String comment_;
-  protected int position_;  // in table
-
-  protected final ColumnStats stats_;
-
-  public Column(String name, Type type, int position) {
-    this(name, type, null, position);
-  }
-
-  public Column(String name, Type type, String comment, int position) {
-    name_ = name;
-    type_ = type;
-    comment_ = comment;
-    position_ = position;
-    stats_ = new ColumnStats(type);
-  }
-
-  public String getComment() { return comment_; }
-  public String getName() { return name_; }
-  public Type getType() { return type_; }
-  public int getPosition() { return position_; }
-  public void setPosition(int position) { this.position_ = position; }
-  public ColumnStats getStats() { return stats_; }
-
-  public boolean updateStats(ColumnStatisticsData statsData) {
-    boolean statsDataCompatibleWithColType = stats_.update(type_, statsData);
-    LOG.debug("col stats: " + name_ + " #distinct=" + stats_.getNumDistinctValues());
-    return statsDataCompatibleWithColType;
-  }
-
-  public void updateStats(TColumnStats statsData) {
-    stats_.update(type_, statsData);
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this.getClass())
-                  .add("name_", name_)
-                  .add("type_", type_)
-                  .add("comment_", comment_)
-                  .add("stats", stats_)
-                  .add("position_", position_).toString();
-  }
-
-  public static Column fromThrift(TColumn columnDesc) {
-    String comment = columnDesc.isSetComment() ? columnDesc.getComment() : null;
-    Preconditions.checkState(columnDesc.isSetPosition());
-    int position = columnDesc.getPosition();
-    Column col;
-    if (columnDesc.isIs_hbase_column()) {
-      // HBase table column. The HBase column qualifier (column name) is not be set for
-      // the HBase row key, so it being set in the thrift struct is not a precondition.
-      Preconditions.checkState(columnDesc.isSetColumn_family());
-      Preconditions.checkState(columnDesc.isSetIs_binary());
-      col = new HBaseColumn(columnDesc.getColumnName(), columnDesc.getColumn_family(),
-          columnDesc.getColumn_qualifier(), columnDesc.isIs_binary(),
-          Type.fromThrift(columnDesc.getColumnType()), comment, position);
-    } else if (columnDesc.isIs_kudu_column()) {
-      Preconditions.checkState(columnDesc.isSetIs_key());
-      Preconditions.checkState(columnDesc.isSetIs_nullable());
-      col = new KuduColumn(columnDesc.getColumnName(), columnDesc.isIs_key(),
-          columnDesc.isIs_nullable(),
-          Type.fromThrift(columnDesc.getColumnType()), comment, position);
-    } else {
-      // Hdfs table column.
-      col = new Column(columnDesc.getColumnName(),
-          Type.fromThrift(columnDesc.getColumnType()), comment, position);
-    }
-    if (columnDesc.isSetCol_stats()) col.updateStats(columnDesc.getCol_stats());
-    return col;
-  }
-
-  public TColumn toThrift() {
-    TColumn colDesc = new TColumn(name_, type_.toThrift());
-    if (comment_ != null) colDesc.setComment(comment_);
-    colDesc.setPosition(position_);
-    colDesc.setCol_stats(getStats().toThrift());
-    return colDesc;
-  }
-
-  public static List<FieldSchema> toFieldSchemas(List<Column> columns) {
-    return Lists.transform(columns, new Function<Column, FieldSchema>() {
-      public FieldSchema apply(Column column) {
-        Preconditions.checkNotNull(column.getType());
-        return new FieldSchema(column.getName(), column.getType().toSql(),
-            column.getComment());
-      }
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ColumnNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ColumnNotFoundException.java b/fe/src/main/java/com/cloudera/impala/catalog/ColumnNotFoundException.java
deleted file mode 100644
index 4ea47c1..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ColumnNotFoundException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown when a column cannot be found in the catalog.
- */
-public class ColumnNotFoundException extends CatalogException {
-  // Dummy serial UID to avoid Eclipse warnings
-  private static final long serialVersionUID = -2203080667446640542L;
-
-  public ColumnNotFoundException(String s) { super(s); }
-
-  public ColumnNotFoundException(String s, Exception cause) { super(s, cause); }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ColumnStats.java b/fe/src/main/java/com/cloudera/impala/catalog/ColumnStats.java
deleted file mode 100644
index 8f8e4b3..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ColumnStats.java
+++ /dev/null
@@ -1,334 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.thrift.TColumnStats;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-
-/**
- * Statistics for a single column.
- */
-public class ColumnStats {
-  private final static Logger LOG = LoggerFactory.getLogger(ColumnStats.class);
-
-  // Set of the currently supported column stats column types.
-  private final static Set<PrimitiveType> SUPPORTED_COL_TYPES = Sets.newHashSet(
-      PrimitiveType.BIGINT, PrimitiveType.BINARY, PrimitiveType.BOOLEAN,
-      PrimitiveType.DOUBLE, PrimitiveType.FLOAT, PrimitiveType.INT,
-      PrimitiveType.SMALLINT, PrimitiveType.CHAR, PrimitiveType.VARCHAR,
-      PrimitiveType.STRING, PrimitiveType.TIMESTAMP, PrimitiveType.TINYINT,
-      PrimitiveType.DECIMAL);
-
-  public enum StatsKey {
-    NUM_DISTINCT_VALUES("numDVs"),
-    NUM_NULLS("numNulls"),
-    AVG_SIZE("avgSize"),
-    MAX_SIZE("maxSize");
-
-    private final String name_;
-
-    private StatsKey(String name) { name_ = name; }
-
-    /**
-     * Returns the StatsKey whose name matches 'key'. The comparison is
-     * case insensitive. Returns null if there is no matching StatsKey.
-     */
-    public static StatsKey fromString(String key) {
-      for (StatsKey k: values()) {
-        if (key.equalsIgnoreCase(k.name_)) return k;
-      }
-      return null;
-    }
-
-    @Override
-    public String toString() { return name_; }
-  }
-
-  // in bytes: excludes serialization overhead
-  private double avgSize_;
-  // in bytes; includes serialization overhead.
-  private double avgSerializedSize_;
-  private long maxSize_;  // in bytes
-  private long numDistinctValues_;
-  private long numNulls_;
-
-  public ColumnStats(Type colType) {
-    initColStats(colType);
-  }
-
-  /**
-   * C'tor for clone().
-   */
-  private ColumnStats(ColumnStats other) {
-    avgSize_ = other.avgSize_;
-    avgSerializedSize_ = other.avgSerializedSize_;
-    maxSize_ = other.maxSize_;
-    numDistinctValues_ = other.numDistinctValues_;
-    numNulls_ = other.numNulls_;
-  }
-
-  /**
-   * Initializes all column stats values as "unknown". For fixed-length type
-   * (those which don't need additional storage besides the slot they occupy),
-   * sets avgSerializedSize and maxSize to their slot size.
-   */
-  private void initColStats(Type colType) {
-    avgSize_ = -1;
-    avgSerializedSize_ = -1;
-    maxSize_ = -1;
-    numDistinctValues_ = -1;
-    numNulls_ = -1;
-    if (colType.isFixedLengthType()) {
-      avgSerializedSize_ = colType.getSlotSize();
-      avgSize_ = colType.getSlotSize();
-      maxSize_ = colType.getSlotSize();
-    }
-  }
-
-  /**
-   * Creates ColumnStats from the given expr. Sets numDistinctValues and if the expr
-   * is a SlotRef also numNulls.
-   */
-  public static ColumnStats fromExpr(Expr expr) {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkState(expr.getType().isValid());
-    ColumnStats stats = new ColumnStats(expr.getType());
-    stats.setNumDistinctValues(expr.getNumDistinctValues());
-    SlotRef slotRef = expr.unwrapSlotRef(false);
-    if (slotRef == null) return stats;
-    ColumnStats slotStats = slotRef.getDesc().getStats();
-    if (slotStats == null) return stats;
-    stats.numNulls_ = slotStats.getNumNulls();
-    stats.avgSerializedSize_ = slotStats.getAvgSerializedSize();
-    stats.avgSize_ = slotStats.getAvgSize();
-    stats.maxSize_ = slotStats.getMaxSize();
-    return stats;
-  }
-
-  /**
-   * Adds other's numDistinctValues and numNulls to this ColumnStats.
-   * If this or other's stats are invalid, sets the corresponding stat to invalid,
-   * Returns this with the updated stats.
-   * This method is used to aggregate stats for slots that originate from multiple
-   * source slots, e.g., those produced by union queries.
-   */
-  public ColumnStats add(ColumnStats other) {
-    if (numDistinctValues_ == -1 || other.numDistinctValues_ == -1) {
-      numDistinctValues_ = -1;
-    } else {
-      numDistinctValues_ += other.numDistinctValues_;
-    }
-    if (numNulls_ == -1 || other.numNulls_ == -1) {
-      numNulls_ = -1;
-    } else {
-      numNulls_ += other.numNulls_;
-    }
-    return this;
-  }
-
-  public void setAvgSize(float avgSize) { avgSize_ = avgSize; }
-  public void setAvgSerializedSize(float avgSize) { avgSerializedSize_ = avgSize; }
-  public void setMaxSize(long maxSize) { maxSize_ = maxSize; }
-  public long getNumDistinctValues() { return numDistinctValues_; }
-  public void setNumDistinctValues(long numDistinctValues) {
-    this.numDistinctValues_ = numDistinctValues;
-  }
-  public void setNumNulls(long numNulls) { numNulls_ = numNulls; }
-  public double getAvgSerializedSize() { return avgSerializedSize_; }
-  public double getAvgSize() { return avgSize_; }
-  public long getMaxSize() { return maxSize_; }
-  public boolean hasNulls() { return numNulls_ > 0; }
-  public long getNumNulls() { return numNulls_; }
-  public boolean hasAvgSerializedSize() { return avgSerializedSize_ >= 0; }
-  public boolean hasMaxSize() { return maxSize_ >= 0; }
-  public boolean hasNumDistinctValues() { return numDistinctValues_ >= 0; }
-  public boolean hasStats() { return numNulls_ != -1 || numDistinctValues_ != -1; }
-
-  /**
-   * Updates the stats with the given ColumnStatisticsData. If the ColumnStatisticsData
-   * is not compatible with the given colType, all stats are initialized based on
-   * initColStats().
-   * Returns false if the ColumnStatisticsData data was incompatible with the given
-   * column type, otherwise returns true.
-   */
-  public boolean update(Type colType, ColumnStatisticsData statsData) {
-    Preconditions.checkState(isSupportedColType(colType));
-    initColStats(colType);
-    boolean isCompatible = false;
-    switch (colType.getPrimitiveType()) {
-      case BOOLEAN:
-        isCompatible = statsData.isSetBooleanStats();
-        if (isCompatible) {
-          BooleanColumnStatsData boolStats = statsData.getBooleanStats();
-          numNulls_ = boolStats.getNumNulls();
-          numDistinctValues_ = (numNulls_ > 0) ? 3 : 2;
-        }
-        break;
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-      case TIMESTAMP: // Hive and Impala use LongColumnStatsData for timestamps.
-        isCompatible = statsData.isSetLongStats();
-        if (isCompatible) {
-          LongColumnStatsData longStats = statsData.getLongStats();
-          numDistinctValues_ = longStats.getNumDVs();
-          numNulls_ = longStats.getNumNulls();
-        }
-        break;
-      case FLOAT:
-      case DOUBLE:
-        isCompatible = statsData.isSetDoubleStats();
-        if (isCompatible) {
-          DoubleColumnStatsData doubleStats = statsData.getDoubleStats();
-          numDistinctValues_ = doubleStats.getNumDVs();
-          numNulls_ = doubleStats.getNumNulls();
-        }
-        break;
-      case CHAR:
-      case VARCHAR:
-      case STRING:
-        isCompatible = statsData.isSetStringStats();
-        if (isCompatible) {
-          StringColumnStatsData stringStats = statsData.getStringStats();
-          numDistinctValues_ = stringStats.getNumDVs();
-          numNulls_ = stringStats.getNumNulls();
-          maxSize_ = stringStats.getMaxColLen();
-          avgSize_ = Double.valueOf(stringStats.getAvgColLen()).floatValue();
-          avgSerializedSize_ = avgSize_ + PrimitiveType.STRING.getSlotSize();
-        }
-        break;
-      case BINARY:
-        isCompatible = statsData.isSetStringStats();
-        if (isCompatible) {
-          BinaryColumnStatsData binaryStats = statsData.getBinaryStats();
-          numNulls_ = binaryStats.getNumNulls();
-          maxSize_ = binaryStats.getMaxColLen();
-          avgSize_ = Double.valueOf(binaryStats.getAvgColLen()).floatValue();
-          avgSerializedSize_ = avgSize_ + PrimitiveType.BINARY.getSlotSize();
-        }
-        break;
-      case DECIMAL:
-        isCompatible = statsData.isSetDecimalStats();
-        if (isCompatible) {
-          DecimalColumnStatsData decimalStats = statsData.getDecimalStats();
-          numNulls_ = decimalStats.getNumNulls();
-          numDistinctValues_ = decimalStats.getNumDVs();
-        }
-        break;
-      default:
-        Preconditions.checkState(false,
-            "Unexpected column type: " + colType.toString());
-        break;
-    }
-    return isCompatible;
-  }
-
-  /**
-   * Sets the member corresponding to the given stats key to 'value'.
-   * Requires that the given value is of a type appropriate for the
-   * member being set. Throws if that is not the case.
-   */
-  public void update(StatsKey key, Number value) {
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(value);
-    if (key == StatsKey.AVG_SIZE) {
-      Preconditions.checkArgument(value instanceof Float);
-    } else {
-      Preconditions.checkArgument(value instanceof Long);
-    }
-    switch (key) {
-      case NUM_DISTINCT_VALUES: {
-        numDistinctValues_ = (Long) value;
-        break;
-      }
-      case NUM_NULLS: {
-        numNulls_ = (Long) value;
-        break;
-      }
-      case AVG_SIZE: {
-        avgSize_ = (Float) value;
-        break;
-      }
-      case MAX_SIZE: {
-        maxSize_ = (Long) value;
-        break;
-      }
-      default: Preconditions.checkState(false);
-    }
-  }
-
-  /**
-   * Returns true if the given PrimitiveType supports column stats updates.
-   */
-  public static boolean isSupportedColType(Type colType) {
-    if (!colType.isScalarType()) return false;
-    ScalarType scalarType = (ScalarType) colType;
-    return SUPPORTED_COL_TYPES.contains(scalarType.getPrimitiveType());
-  }
-
-  public void update(Type colType, TColumnStats stats) {
-    initColStats(colType);
-    avgSize_ = Double.valueOf(stats.getAvg_size()).floatValue();
-    if (colType.getPrimitiveType() == PrimitiveType.STRING ||
-        colType.getPrimitiveType() == PrimitiveType.BINARY) {
-      avgSerializedSize_ = colType.getSlotSize() + avgSize_;
-    }
-    maxSize_ = stats.getMax_size();
-    numDistinctValues_ = stats.getNum_distinct_values();
-    numNulls_ = stats.getNum_nulls();
-  }
-
-  public TColumnStats toThrift() {
-    TColumnStats colStats = new TColumnStats();
-    colStats.setAvg_size(avgSize_);
-    colStats.setMax_size(maxSize_);
-    colStats.setNum_distinct_values(numDistinctValues_);
-    colStats.setNum_nulls(numNulls_);
-    return colStats;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this.getClass())
-        .add("avgSerializedSize_", avgSerializedSize_)
-        .add("maxSize_", maxSize_)
-        .add("numDistinct_", numDistinctValues_)
-        .add("numNulls_", numNulls_)
-        .toString();
-  }
-
-  @Override
-  public ColumnStats clone() { return new ColumnStats(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/DataSource.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/DataSource.java b/fe/src/main/java/com/cloudera/impala/catalog/DataSource.java
deleted file mode 100644
index ed0d9ee..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/DataSource.java
+++ /dev/null
@@ -1,88 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.hadoop.fs.Path;
-
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDataSource;
-import com.google.common.base.Objects;
-
-/**
- * Represents a data source in the catalog. Contains the data source name and all
- * information needed to locate and load the data source.
- */
-public class DataSource implements CatalogObject {
-  private final String dataSrcName_;
-  private final String className_;
-  private final String apiVersionString_;
-  // Qualified path to the data source.
-  private final String location_;
-  private long catalogVersion_ =  Catalog.INITIAL_CATALOG_VERSION;
-
-  public DataSource(String dataSrcName, String location, String className,
-      String apiVersionString) {
-    dataSrcName_ = dataSrcName;
-    location_ = location;
-    className_ = className;
-    apiVersionString_ = apiVersionString;
-  }
-
-  public static DataSource fromThrift(TDataSource thrift) {
-    return new DataSource(thrift.getName(), thrift.getHdfs_location(),
-        thrift.getClass_name(), thrift.getApi_version());
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.DATA_SOURCE;
-  }
-
-  @Override
-  public long getCatalogVersion() { return catalogVersion_; }
-
-  @Override
-  public void setCatalogVersion(long newVersion) { catalogVersion_ = newVersion; }
-
-  @Override
-  public String getName() { return dataSrcName_; }
-
-  @Override
-  public boolean isLoaded() { return true; }
-
-  public String getLocation() { return location_; }
-  public String getClassName() { return className_; }
-  public String getApiVersion() { return apiVersionString_; }
-
-  public TDataSource toThrift() {
-    return new TDataSource(getName(), location_, className_, apiVersionString_);
-  }
-
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("name", dataSrcName_)
-        .add("location", location_)
-        .add("className", className_)
-        .add("apiVersion", apiVersionString_)
-        .toString();
-  }
-
-  public static String debugString(TDataSource thrift) {
-    return fromThrift(thrift).debugString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/DataSourceTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/DataSourceTable.java b/fe/src/main/java/com/cloudera/impala/catalog/DataSourceTable.java
deleted file mode 100644
index c42c804..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/DataSourceTable.java
+++ /dev/null
@@ -1,259 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TDataSource;
-import com.cloudera.impala.thrift.TDataSourceTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a table backed by an external data source. All data source properties are
- * stored as table properties (persisted in the metastore) because the DataSource catalog
- * object is not persisted so the DataSource catalog object will not exist if the catalog
- * server is restarted, but the table does not need the DataSource catalog object in
- * order to scan the table. Tables that contain the TBL_PROP_DATA_SRC_NAME table
- * parameter are assumed to be backed by an external data source.
- */
-public class DataSourceTable extends Table {
-  private final static Logger LOG = LoggerFactory.getLogger(DataSourceTable.class);
-
-  /**
-   * Table property key for the data source name.
-   */
-  public static final String TBL_PROP_DATA_SRC_NAME = "__IMPALA_DATA_SOURCE_NAME";
-
-  /**
-   * Table property key for the table init string.
-   */
-  public static final String TBL_PROP_INIT_STRING = "__IMPALA_DATA_SOURCE_INIT_STRING";
-
-  /**
-   * Table property key for the data source library HDFS path.
-   */
-  public static final String TBL_PROP_LOCATION = "__IMPALA_DATA_SOURCE_LOCATION";
-
-  /**
-   * Table property key for the class implementing {@link ExternalDataSource}.
-   */
-  public static final String TBL_PROP_CLASS = "__IMPALA_DATA_SOURCE_CLASS";
-
-  /**
-   * Table property key for the API version implemented by the data source.
-   */
-  public static final String TBL_PROP_API_VER = "__IMPALA_DATA_SOURCE_API_VERSION";
-
-  private String initString_;
-  private TDataSource dataSource_;
-
-  protected DataSourceTable(
-      TableId id, org.apache.hadoop.hive.metastore.api.Table msTable,
-      Db db, String name, String owner) {
-    super(id, msTable, db, name, owner);
-  }
-
-  /**
-   * Gets the the data source.
-   */
-  public TDataSource getDataSource() { return dataSource_; }
-
-  /**
-   * Gets the table init string passed to the data source.
-   */
-  public String getInitString() { return initString_; }
-
-  public int getNumNodes() { return 1; }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.TABLE; }
-
-  /**
-   * Returns true if the column type is supported.
-   */
-  public static boolean isSupportedColumnType(Type colType) {
-    Preconditions.checkNotNull(colType);
-    return isSupportedPrimitiveType(colType.getPrimitiveType());
-  }
-
-  /**
-   * Returns true if the primitive type is supported.
-   */
-  public static boolean isSupportedPrimitiveType(PrimitiveType primitiveType) {
-    Preconditions.checkNotNull(primitiveType);
-    switch (primitiveType) {
-      case BIGINT:
-      case INT:
-      case SMALLINT:
-      case TINYINT:
-      case DOUBLE:
-      case FLOAT:
-      case BOOLEAN:
-      case STRING:
-      case TIMESTAMP:
-      case DECIMAL:
-        return true;
-      case BINARY:
-      case CHAR:
-      case DATE:
-      case DATETIME:
-      case INVALID_TYPE:
-      case NULL_TYPE:
-      default:
-        return false;
-    }
-  }
-
-  /**
-   * Create columns corresponding to fieldSchemas.
-   * Throws a TableLoadingException if the metadata is incompatible with what we
-   * support.
-   */
-  private void loadColumns(List<FieldSchema> fieldSchemas, IMetaStoreClient client)
-      throws TableLoadingException {
-    int pos = 0;
-    for (FieldSchema s: fieldSchemas) {
-      Column col = new Column(s.getName(), parseColumnType(s), s.getComment(), pos);
-      Preconditions.checkArgument(isSupportedColumnType(col.getType()));
-      addColumn(col);
-      ++pos;
-    }
-  }
-
-  @Override
-  protected void loadFromThrift(TTable thriftTable) throws TableLoadingException {
-    super.loadFromThrift(thriftTable);
-    TDataSourceTable dataSourceTable = thriftTable.getData_source_table();
-    initString_ = dataSourceTable.getInit_string();
-    dataSource_ = dataSourceTable.getData_source();
-  }
-
-  @Override
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    Preconditions.checkNotNull(msTbl);
-    msTable_ = msTbl;
-    clearColumns();
-    LOG.debug("load table: " + db_.getName() + "." + name_);
-    String dataSourceName = getRequiredTableProperty(msTbl, TBL_PROP_DATA_SRC_NAME, null);
-    String location = getRequiredTableProperty(msTbl, TBL_PROP_LOCATION, dataSourceName);
-    String className = getRequiredTableProperty(msTbl, TBL_PROP_CLASS, dataSourceName);
-    String apiVersionString = getRequiredTableProperty(msTbl, TBL_PROP_API_VER,
-        dataSourceName);
-    dataSource_ = new TDataSource(dataSourceName, location, className, apiVersionString);
-    initString_ = getRequiredTableProperty(msTbl, TBL_PROP_INIT_STRING, dataSourceName);
-
-    if (msTbl.getPartitionKeysSize() > 0) {
-      throw new TableLoadingException("Data source table cannot contain clustering " +
-          "columns: " + name_);
-    }
-    numClusteringCols_ = 0;
-
-    try {
-      // Create column objects.
-      List<FieldSchema> fieldSchemas = getMetaStoreTable().getSd().getCols();
-      loadColumns(fieldSchemas, client);
-
-      // Set table stats.
-      numRows_ = getRowCount(super.getMetaStoreTable().getParameters());
-    } catch (Exception e) {
-      throw new TableLoadingException("Failed to load metadata for data source table: " +
-          name_, e);
-    }
-  }
-
-  private String getRequiredTableProperty(
-      org.apache.hadoop.hive.metastore.api.Table msTbl, String key, String dataSourceName)
-      throws TableLoadingException {
-    String val = msTbl.getParameters().get(key);
-    if (val == null) {
-      throw new TableLoadingException(String.format("Failed to load table %s produced " +
-          "by external data source %s. Missing required metadata: %s", name_,
-          dataSourceName == null ? "<unknown>" : dataSourceName, key));
-    }
-    return val;
-  }
-
-  /**
-   * Returns statistics on this table as a tabular result set. Used for the
-   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
-   * inside this method.
-   */
-  public TResultSet getTableStats() {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
-    result.setSchema(resultSchema);
-    TResultRowBuilder rowBuilder = new TResultRowBuilder();
-    rowBuilder.add(numRows_);
-    result.addToRows(rowBuilder.get());
-    return result;
-  }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    TTableDescriptor tableDesc = new TTableDescriptor(id_.asInt(),
-        TTableType.DATA_SOURCE_TABLE, getTColumnDescriptors(), numClusteringCols_,
-        name_, db_.getName());
-    tableDesc.setDataSourceTable(getDataSourceTable());
-    return tableDesc;
-  }
-
-  /**
-   * Returns a thrift structure representing the table.
-   */
-  @Override
-  public TTable toThrift() {
-    TTable table = super.toThrift();
-    table.setTable_type(TTableType.DATA_SOURCE_TABLE);
-    table.setData_source_table(getDataSourceTable());
-    return table;
-  }
-
-  /**
-   * Returns a thrift {@link TDataSourceTable} structure for the data source table.
-   */
-  private TDataSourceTable getDataSourceTable() {
-    return new TDataSourceTable(dataSource_, initString_);
-  }
-
-  /**
-   * True if the Hive {@link org.apache.hadoop.hive.metastore.api.Table} is a
-   * data source table by checking for the existance of the
-   * TBL_PROP_DATA_SRC_NAME table property.
-   */
-  public static boolean isDataSourceTable(
-      org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    return msTbl.getParameters().containsKey(TBL_PROP_DATA_SRC_NAME);
-  }
-}



[53/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
index a3a1fa0..0ee7d28 100644
--- a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.IOException;
 import java.util.Map;
@@ -30,13 +30,13 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.JniCatalogConstants;
-import com.cloudera.impala.thrift.THdfsCachingOp;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.JniCatalogConstants;
+import org.apache.impala.thrift.THdfsCachingOp;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/KuduUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/KuduUtil.java b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
index 4f627d8..b9f8653 100644
--- a/fe/src/main/java/org/apache/impala/util/KuduUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.StringReader;
 import java.util.HashMap;
@@ -25,11 +25,11 @@ import javax.json.Json;
 import javax.json.JsonArray;
 import javax.json.JsonReader;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TDistributeByRangeParam;
-import com.cloudera.impala.thrift.TRangeLiteral;
-import com.cloudera.impala.thrift.TRangeLiteralList;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TDistributeByRangeParam;
+import org.apache.impala.thrift.TRangeLiteral;
+import org.apache.impala.thrift.TRangeLiteralList;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -43,7 +43,7 @@ import org.apache.kudu.Type;
 import org.apache.kudu.client.KuduTable;
 import org.apache.kudu.client.PartialRow;
 
-import static com.cloudera.impala.catalog.Type.parseColumnType;
+import static org.apache.impala.catalog.Type.parseColumnType;
 import static java.lang.String.format;
 
 public class KuduUtil {
@@ -224,7 +224,7 @@ public class KuduUtil {
    * Converts a given Impala catalog type to the Kudu type. Throws an exception if the
    * type cannot be converted.
    */
-  public static Type fromImpalaType(com.cloudera.impala.catalog.Type t)
+  public static Type fromImpalaType(org.apache.impala.catalog.Type t)
       throws ImpalaRuntimeException {
     if (!t.isScalarType()) {
       throw new ImpalaRuntimeException(format(

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/ListMap.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/ListMap.java b/fe/src/main/java/org/apache/impala/util/ListMap.java
index 989a510..07de611 100644
--- a/fe/src/main/java/org/apache/impala/util/ListMap.java
+++ b/fe/src/main/java/org/apache/impala/util/ListMap.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.ArrayList;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java b/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
index ac85ff8..d4b3da9 100644
--- a/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
+++ b/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
-import com.cloudera.impala.planner.NestedLoopJoinNode;
-import com.cloudera.impala.planner.HashJoinNode;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.planner.ScanNode;
+import org.apache.impala.planner.NestedLoopJoinNode;
+import org.apache.impala.planner.HashJoinNode;
+import org.apache.impala.planner.PlanNode;
+import org.apache.impala.planner.ScanNode;
 
 /**
  * Returns the maximum number of rows processed by any node in a given plan tree

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/MembershipSnapshot.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MembershipSnapshot.java b/fe/src/main/java/org/apache/impala/util/MembershipSnapshot.java
index 57574fe..24d1412 100644
--- a/fe/src/main/java/org/apache/impala/util/MembershipSnapshot.java
+++ b/fe/src/main/java/org/apache/impala/util/MembershipSnapshot.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TUpdateMembershipRequest;
 import com.google.common.collect.Sets;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
index 540c749..6968f33 100644
--- a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.List;
 import java.util.Map;
@@ -27,8 +27,8 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/NativeLibUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/NativeLibUtil.java b/fe/src/main/java/org/apache/impala/util/NativeLibUtil.java
index 08c929b..877b36b 100644
--- a/fe/src/main/java/org/apache/impala/util/NativeLibUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/NativeLibUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.File;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/NativeLogger.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/NativeLogger.java b/fe/src/main/java/org/apache/impala/util/NativeLogger.java
index 9956ca4..ebba884 100644
--- a/fe/src/main/java/org/apache/impala/util/NativeLogger.java
+++ b/fe/src/main/java/org/apache/impala/util/NativeLogger.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/PatternMatcher.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/PatternMatcher.java b/fe/src/main/java/org/apache/impala/util/PatternMatcher.java
index bc85d18..37fa208 100644
--- a/fe/src/main/java/org/apache/impala/util/PatternMatcher.java
+++ b/fe/src/main/java/org/apache/impala/util/PatternMatcher.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.Arrays;
 import java.util.Collections;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
index c1c9bd7..c1e8224 100644
--- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
+++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.File;
 import java.io.IOException;
@@ -37,18 +37,18 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.ByteUnits;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TPoolConfigParams;
-import com.cloudera.impala.thrift.TPoolConfig;
-import com.cloudera.impala.thrift.TResolveRequestPoolParams;
-import com.cloudera.impala.thrift.TResolveRequestPoolResult;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.util.FileWatchService.FileChangeListener;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.ByteUnits;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TPoolConfigParams;
+import org.apache.impala.thrift.TPoolConfig;
+import org.apache.impala.thrift.TResolveRequestPoolParams;
+import org.apache.impala.thrift.TResolveRequestPoolResult;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.util.FileWatchService.FileChangeListener;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
index 4f39b3c..083ad48 100644
--- a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
+++ b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.List;
 
@@ -30,16 +30,16 @@ import org.apache.sentry.service.thrift.SentryServiceClientFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.PrivilegeSpec;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
+import org.apache.impala.analysis.PrivilegeSpec;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.RolePrivilege;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TPrivilegeScope;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/SentryProxy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryProxy.java b/fe/src/main/java/org/apache/impala/util/SentryProxy.java
index 76e4931..07669ec 100644
--- a/fe/src/main/java/org/apache/impala/util/SentryProxy.java
+++ b/fe/src/main/java/org/apache/impala/util/SentryProxy.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.List;
 import java.util.Set;
@@ -28,16 +28,16 @@ import org.apache.sentry.provider.db.service.thrift.TSentryGroup;
 import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
 import org.apache.sentry.provider.db.service.thrift.TSentryRole;
 
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TPrivilege;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.CatalogServiceCatalog;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.catalog.RolePrivilege;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TPrivilege;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/StatsHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/StatsHelper.java b/fe/src/main/java/org/apache/impala/util/StatsHelper.java
index 9956c79..cb110ac 100644
--- a/fe/src/main/java/org/apache/impala/util/StatsHelper.java
+++ b/fe/src/main/java/org/apache/impala/util/StatsHelper.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 /**
  *  This is a utility class to incrementally calculate average, variance

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/TAccessLevelUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/TAccessLevelUtil.java b/fe/src/main/java/org/apache/impala/util/TAccessLevelUtil.java
index 2e58efa..0e59252 100644
--- a/fe/src/main/java/org/apache/impala/util/TAccessLevelUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/TAccessLevelUtil.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
-import com.cloudera.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.TAccessLevel;
 
 /**
  * Utility functions for working with TAccessLevel types.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/TColumnValueUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/TColumnValueUtil.java b/fe/src/main/java/org/apache/impala/util/TColumnValueUtil.java
index 23d6eb6..16085b0 100644
--- a/fe/src/main/java/org/apache/impala/util/TColumnValueUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/TColumnValueUtil.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
-import com.cloudera.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TColumnValue;
 
 /**
  * Utility functions for working with TColumnValue objects.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/TResultRowBuilder.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/TResultRowBuilder.java b/fe/src/main/java/org/apache/impala/util/TResultRowBuilder.java
index ae70eb4..1481823 100644
--- a/fe/src/main/java/org/apache/impala/util/TResultRowBuilder.java
+++ b/fe/src/main/java/org/apache/impala/util/TResultRowBuilder.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TResultRow;
 
 /**
  * Utility class for building TResultRows.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/TSessionStateUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/TSessionStateUtil.java b/fe/src/main/java/org/apache/impala/util/TSessionStateUtil.java
index f055a58..b52c213 100644
--- a/fe/src/main/java/org/apache/impala/util/TSessionStateUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/TSessionStateUtil.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
-import com.cloudera.impala.thrift.TSessionState;
+import org.apache.impala.thrift.TSessionState;
 
 /**
  * Utility functions for working with TSessionState objects.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/UnsafeUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/UnsafeUtil.java b/fe/src/main/java/org/apache/impala/util/UnsafeUtil.java
index f0c69c5..8ba0f59 100644
--- a/fe/src/main/java/org/apache/impala/util/UnsafeUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/UnsafeUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.lang.reflect.Field;
 import java.security.AccessController;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/util/Visitor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/Visitor.java b/fe/src/main/java/org/apache/impala/util/Visitor.java
index ac7b8e3..504c24a 100644
--- a/fe/src/main/java/org/apache/impala/util/Visitor.java
+++ b/fe/src/main/java/org/apache/impala/util/Visitor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 /**
 * Interface for visitor pattern

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/jflex/sql-scanner.flex
----------------------------------------------------------------------
diff --git a/fe/src/main/jflex/sql-scanner.flex b/fe/src/main/jflex/sql-scanner.flex
index 8a1fe67..a0a0122 100644
--- a/fe/src/main/jflex/sql-scanner.flex
+++ b/fe/src/main/jflex/sql-scanner.flex
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java_cup.runtime.Symbol;
 import java.lang.Integer;
@@ -28,7 +28,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.ArrayList;
 
-import com.cloudera.impala.analysis.SqlParserSymbols;
+import org.apache.impala.analysis.SqlParserSymbols;
 
 %%
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeAuthStmtsTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeAuthStmtsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeAuthStmtsTest.java
index ec12ab8..c37047f 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeAuthStmtsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeAuthStmtsTest.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.HashSet;
 
 import org.junit.Test;
 
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.TQueryCtx;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.TQueryCtx;
 
 public class AnalyzeAuthStmtsTest extends AnalyzerTest {
   public AnalyzeAuthStmtsTest() throws AnalysisException {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index f998e91..c47135e 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertTrue;
 
@@ -34,22 +34,22 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.FrontendTestBase;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.util.MetaStoreUtil;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.util.MetaStoreUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -2143,7 +2143,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "SYMBOL=" + symbol;
     final String hdfsPath = "hdfs://localhost:20500/test-warehouse/libTestUdfs.so";
     final String javaFnSuffix = " LOCATION '/test-warehouse/impala-hive-udfs.jar' " +
-        "SYMBOL='com.cloudera.impala.TestUdf'";
+        "SYMBOL='org.apache.impala.TestUdf'";
 
     AnalyzesOk("create function foo() RETURNS int" + udfSuffix);
     AnalyzesOk("create function foo(int, int, string) RETURNS int" + udfSuffix);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
index 1763095..72b5163 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -29,24 +29,24 @@ import java.util.List;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.cloudera.impala.analysis.TimestampArithmeticExpr.TimeUnit;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TestSchemaUtils;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.TimestampArithmeticExpr.TimeUnit;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TestSchemaUtils;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeModifyStmtsTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeModifyStmtsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeModifyStmtsTest.java
index 9ed20c8..939d499 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeModifyStmtsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeModifyStmtsTest.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.junit.Test;
 
-import com.cloudera.impala.testutil.TestUtils;
+import org.apache.impala.testutil.TestUtils;
 
 /**
  * Tests analysis phase of the ModifyStmt and its sub-classes.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
index aeef89a..f437bc8 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -26,10 +26,10 @@ import java.util.List;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzeSubqueriesTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeSubqueriesTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeSubqueriesTest.java
index 8c30439..a37ba72 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeSubqueriesTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeSubqueriesTest.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
 
 public class AnalyzeSubqueriesTest extends AnalyzerTest {
   private static String cmpOperators[] = {"=", "!=", "<=", ">=", ">", "<"};

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
index 815279c..d763deb 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -27,13 +27,13 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FrontendTestBase;
-import com.cloudera.impala.thrift.TExpr;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.thrift.TExpr;
 import com.google.common.base.Preconditions;
 
 public class AnalyzerTest extends FrontendTestBase {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AuditingTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuditingTest.java b/fe/src/test/java/org/apache/impala/analysis/AuditingTest.java
index 3160511..eee9cce 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuditingTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuditingTest.java
@@ -15,24 +15,24 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Set;
 
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.Frontend;
-import com.cloudera.impala.testutil.ImpaladTestCatalog;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.testutil.ImpaladTestCatalog;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TCatalogObjectType;
 import com.google.common.collect.Sets;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 9ee4312..38aff7d 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -46,34 +46,34 @@ import org.junit.runners.Parameterized.Parameters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.AuthorizeableTable;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.service.Frontend;
-import com.cloudera.impala.testutil.ImpaladTestCatalog;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TMetadataOpcode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TSessionState;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.SentryPolicyService;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizeableTable;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.testutil.ImpaladTestCatalog;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TMetadataOpcode;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TPrivilegeScope;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TSessionState;
+import org.apache.impala.util.PatternMatcher;
+import org.apache.impala.util.SentryPolicyService;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -435,7 +435,7 @@ public class AuthorizationTest {
   @After
   public void TestTPCHCleanup() throws AuthorizationException, AnalysisException {
     // Failure to cleanup TPCH can cause:
-    // TestDropDatabase(com.cloudera.impala.analysis.AuthorizationTest):
+    // TestDropDatabase(org.apache.impala.analysis.AuthorizationTest):
     // Cannot drop non-empty database: tpch
     if (ctx_.catalog.getDb("tpch").numFunctions() != 0) {
       fail("Failed to clean up functions in tpch.");

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/ExprTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ExprTest.java b/fe/src/test/java/org/apache/impala/analysis/ExprTest.java
index feede2c..7ae0f89 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ExprTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ExprTest.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.fail;
 
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
 
 
 public class ExprTest {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 0db8af5..8b8ea42 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -30,9 +30,9 @@ import java.util.List;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.junit.Test;
 
-import com.cloudera.impala.analysis.TimestampArithmeticExpr.TimeUnit;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.testutil.TestUtils;
+import org.apache.impala.analysis.TimestampArithmeticExpr.TimeUnit;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.testutil.TestUtils;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
index 6b502a1..371e811 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.fail;
 
 import org.junit.Ignore;
 import org.junit.Test;
 
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FrontendTestBase;
-import com.cloudera.impala.testutil.TestUtils;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.testutil.TestUtils;
 import com.google.common.base.Preconditions;
 
 // TODO: Expand this test, in particular, because view creation relies

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/analysis/TypesUtilTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/TypesUtilTest.java b/fe/src/test/java/org/apache/impala/analysis/TypesUtilTest.java
index 4e590d4..9b36608 100644
--- a/fe/src/test/java/org/apache/impala/analysis/TypesUtilTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/TypesUtilTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import static org.junit.Assert.assertTrue;
 
@@ -25,13 +25,13 @@ import java.util.List;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
 import com.google.common.collect.Lists;
 
 // TODO: move other types related tests into this class to break up the large

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index 0fcb7a6..ebd99ba 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import static org.junit.Assert.fail;
 
@@ -26,17 +26,17 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.cloudera.impala.testutil.CatalogServiceTestCatalog;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.THBaseTable;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.THdfsTable;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableType;
+import org.apache.impala.testutil.CatalogServiceTestCatalog;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.THBaseTable;
+import org.apache.impala.thrift.THdfsPartition;
+import org.apache.impala.thrift.THdfsTable;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableType;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
index 9ed09ed..e6b71cd 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import static com.cloudera.impala.thrift.ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
+import static org.apache.impala.thrift.ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -35,13 +35,13 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.junit.Test;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.testutil.CatalogServiceTestCatalog;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.HdfsUri;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NumericLiteral;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.testutil.CatalogServiceTestCatalog;
+import org.apache.impala.thrift.TFunctionBinaryType;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
index fa86d14..100a513 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import static org.junit.Assert.assertTrue;
 
@@ -23,11 +23,11 @@ import java.math.BigDecimal;
 import java.util.List;
 import java.lang.*;
 
-import com.cloudera.impala.analysis.*;
+import org.apache.impala.analysis.*;
 import com.google.common.collect.Lists;
 import org.junit.Test;
 
-import static com.cloudera.impala.catalog.HdfsPartition.comparePartitionKeyValues;
+import static org.apache.impala.catalog.HdfsPartition.comparePartitionKeyValues;
 
 public class HdfsPartitionTest {
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java b/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
index decfc84..0d8af1c 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsStorageDescriptorTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -29,8 +29,8 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.HdfsStorageDescriptor.InvalidStorageDescriptorException;
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.catalog.HdfsStorageDescriptor.InvalidStorageDescriptorException;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.collect.ImmutableList;
 
 public class HdfsStorageDescriptorTest {
@@ -60,7 +60,7 @@ public class HdfsStorageDescriptorTest {
         "parquet.hive.MapredParquetInputFormat",
         "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"};
     String [] outputFormats = new String [] {
-        "com.cloudera.impala.hive.serde.ParquetOutputFormat",
+        "org.apache.impala.hive.serde.ParquetOutputFormat",
         "parquet.hive.DeprecatedParquetOutputFormat",
         "parquet.hive.MapredParquetOutputFormat",
         "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"};

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java b/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
index 1afcb38..43ac610 100644
--- a/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
+++ b/fe/src/test/java/org/apache/impala/catalog/TestSchemaUtils.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.HashMap;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
index 228dd49..5a53f64 100644
--- a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
+++ b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
@@ -28,35 +28,35 @@ import java.util.List;
 import org.junit.After;
 import org.junit.Assert;
 
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.CreateTableStmt;
-import com.cloudera.impala.analysis.CreateViewStmt;
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.ParseNode;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.service.Frontend;
-import com.cloudera.impala.testutil.ImpaladTestCatalog;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.CreateTableStmt;
+import org.apache.impala.analysis.CreateViewStmt;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.ParseNode;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.analysis.SqlParser;
+import org.apache.impala.analysis.SqlScanner;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.View;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.testutil.ImpaladTestCatalog;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/hive/executor/TestUdf.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/hive/executor/TestUdf.java b/fe/src/test/java/org/apache/impala/hive/executor/TestUdf.java
index 0d3d7cc..58cb24c 100644
--- a/fe/src/test/java/org/apache/impala/hive/executor/TestUdf.java
+++ b/fe/src/test/java/org/apache/impala/hive/executor/TestUdf.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/hive/executor/UdfExecutorTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/hive/executor/UdfExecutorTest.java b/fe/src/test/java/org/apache/impala/hive/executor/UdfExecutorTest.java
index c248df1..243d015 100644
--- a/fe/src/test/java/org/apache/impala/hive/executor/UdfExecutorTest.java
+++ b/fe/src/test/java/org/apache/impala/hive/executor/UdfExecutorTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -63,14 +63,14 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TProtocolFactory;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.THiveUdfExecutorCtorParams;
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.THiveUdfExecutorCtorParams;
+import org.apache.impala.util.UnsafeUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
index 6c88170..88a8631 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import org.junit.Assume;
 import org.junit.Test;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TRuntimeFilterMode;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TRuntimeFilterMode;
 
 // All planner tests, except for S3 specific tests should go here.
 public class PlannerTest extends PlannerTestBase {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
index 6db1293..4464203 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import static org.junit.Assert.fail;
 
@@ -40,38 +40,38 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.ColumnLineageGraph;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.common.FrontendTestBase;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.testutil.TestFileParser;
-import com.cloudera.impala.testutil.TestFileParser.Section;
-import com.cloudera.impala.testutil.TestFileParser.TestCase;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TDescriptorTable;
-import com.cloudera.impala.thrift.TExecRequest;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THBaseKeyRange;
-import com.cloudera.impala.thrift.THdfsFileSplit;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.THdfsPartitionLocation;
-import com.cloudera.impala.thrift.THdfsScanNode;
-import com.cloudera.impala.thrift.THdfsTable;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanFragment;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryExecRequest;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTupleDescriptor;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.cloudera.impala.util.MembershipSnapshot;
+import org.apache.impala.analysis.ColumnLineageGraph;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.testutil.TestFileParser;
+import org.apache.impala.testutil.TestFileParser.Section;
+import org.apache.impala.testutil.TestFileParser.TestCase;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.TDescriptorTable;
+import org.apache.impala.thrift.TExecRequest;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.THBaseKeyRange;
+import org.apache.impala.thrift.THdfsFileSplit;
+import org.apache.impala.thrift.THdfsPartition;
+import org.apache.impala.thrift.THdfsPartitionLocation;
+import org.apache.impala.thrift.THdfsScanNode;
+import org.apache.impala.thrift.THdfsTable;
+import org.apache.impala.thrift.TLineageGraph;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPlanFragment;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryExecRequest;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TScanRangeLocations;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTupleDescriptor;
+import org.apache.impala.thrift.TUpdateMembershipRequest;
+import org.apache.impala.util.MembershipSnapshot;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/planner/S3PlannerTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/planner/S3PlannerTest.java b/fe/src/test/java/org/apache/impala/planner/S3PlannerTest.java
index a6e0661..f571ebe 100644
--- a/fe/src/test/java/org/apache/impala/planner/S3PlannerTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/S3PlannerTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import static org.junit.Assume.assumeTrue;
 
@@ -28,7 +28,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.cloudera.impala.planner.PlannerTestBase;
+import org.apache.impala.planner.PlannerTestBase;
 
 // S3 specific planner tests go here, and will run against tables in S3.  These tests
 // are run only when test.fs.s3a.name is set in the configuration.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/service/FrontendTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/service/FrontendTest.java b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
index c710ae8..dfbdb12 100644
--- a/fe/src/test/java/org/apache/impala/service/FrontendTest.java
+++ b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -33,19 +33,19 @@ import org.apache.hive.service.cli.thrift.TGetSchemasReq;
 import org.apache.hive.service.cli.thrift.TGetTablesReq;
 import org.junit.Test;
 
-import com.cloudera.impala.analysis.AuthorizationTest;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.testutil.ImpaladTestCatalog;
-import com.cloudera.impala.testutil.TestUtils;
-import com.cloudera.impala.thrift.TMetadataOpRequest;
-import com.cloudera.impala.thrift.TMetadataOpcode;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
+import org.apache.impala.analysis.AuthorizationTest;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.testutil.ImpaladTestCatalog;
+import org.apache.impala.testutil.TestUtils;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TMetadataOpcode;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/service/JdbcTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/service/JdbcTest.java b/fe/src/test/java/org/apache/impala/service/JdbcTest.java
index c79a52c..a1f4a29 100644
--- a/fe/src/test/java/org/apache/impala/service/JdbcTest.java
+++ b/fe/src/test/java/org/apache/impala/service/JdbcTest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.service;
+package org.apache.impala.service;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -39,10 +39,10 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.cloudera.impala.analysis.CreateTableStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.testutil.ImpalaJdbcClient;
+import org.apache.impala.analysis.CreateTableStmt;
+import org.apache.impala.analysis.SqlParser;
+import org.apache.impala.analysis.SqlScanner;
+import org.apache.impala.testutil.ImpalaJdbcClient;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
index b59cc53..d5cce94 100644
--- a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
+++ b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
 import java.io.File;
 import java.io.FileWriter;
@@ -27,14 +27,14 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsPartition.FileDescriptor;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.util.PatternMatcher;
 
 /**
  * Utility to generate an output file with all the block ids for each table

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/CatalogServiceTestCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/CatalogServiceTestCatalog.java b/fe/src/test/java/org/apache/impala/testutil/CatalogServiceTestCatalog.java
index ee1cba7..92b127f 100644
--- a/fe/src/test/java/org/apache/impala/testutil/CatalogServiceTestCatalog.java
+++ b/fe/src/test/java/org/apache/impala/testutil/CatalogServiceTestCatalog.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.catalog.AuthorizationPolicy;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.thrift.TUniqueId;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.catalog.AuthorizationPolicy;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.CatalogServiceCatalog;
+import org.apache.impala.thrift.TUniqueId;
 
 /**
  * Test class of the Catalog Server's catalog that exposes internal state that is useful

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/ImpalaJdbcClient.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/ImpalaJdbcClient.java b/fe/src/test/java/org/apache/impala/testutil/ImpalaJdbcClient.java
index 095d123..7176342 100644
--- a/fe/src/test/java/org/apache/impala/testutil/ImpalaJdbcClient.java
+++ b/fe/src/test/java/org/apache/impala/testutil/ImpalaJdbcClient.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
 import java.sql.Connection;
 import java.sql.DriverManager;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java b/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
index 549e572..1dfed8a 100644
--- a/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
+++ b/fe/src/test/java/org/apache/impala/testutil/ImpaladTestCatalog.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HdfsCachePool;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.CatalogServiceCatalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.HdfsCachePool;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.util.PatternMatcher;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/SentryServicePinger.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/SentryServicePinger.java b/fe/src/test/java/org/apache/impala/testutil/SentryServicePinger.java
index 7e4c503..9bb3a5d 100644
--- a/fe/src/test/java/org/apache/impala/testutil/SentryServicePinger.java
+++ b/fe/src/test/java/org/apache/impala/testutil/SentryServicePinger.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
@@ -24,9 +24,9 @@ import org.apache.commons.cli.Options;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.util.SentryPolicyService;
+import org.apache.impala.authorization.User;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.util.SentryPolicyService;
 
 /**
  * Simple class that issues a read-only RPC to the Sentry Service to check if it

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java b/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
index b70d868..5bfd018 100644
--- a/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
+++ b/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 
 import static org.junit.Assert.fail;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/testutil/TestUtils.java b/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
index e53b831..5454c7b 100644
--- a/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
+++ b/fe/src/test/java/org/apache/impala/testutil/TestUtils.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.testutil;
+package org.apache.impala.testutil;
 import java.io.StringReader;
 import java.io.StringWriter;
 import java.text.SimpleDateFormat;
@@ -36,15 +36,15 @@ import org.junit.Assume;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TClientRequest;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TSessionState;
-import com.cloudera.impala.thrift.TSessionType;
-import com.cloudera.impala.thrift.TUniqueId;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.thrift.TClientRequest;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TSessionState;
+import org.apache.impala.thrift.TSessionType;
+import org.apache.impala.thrift.TUniqueId;
 import com.google.common.collect.Maps;
 
 public class TestUtils {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/util/KuduUtilTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/util/KuduUtilTest.java b/fe/src/test/java/org/apache/impala/util/KuduUtilTest.java
index ef5c817..3827d72 100644
--- a/fe/src/test/java/org/apache/impala/util/KuduUtilTest.java
+++ b/fe/src/test/java/org/apache/impala/util/KuduUtilTest.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.util.List;
 
-import com.cloudera.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.ImpalaRuntimeException;
 import com.google.common.collect.ImmutableList;
 import org.junit.Test;
 import org.apache.kudu.ColumnSchema;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/util/TestDisjointSet.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/util/TestDisjointSet.java b/fe/src/test/java/org/apache/impala/util/TestDisjointSet.java
index 8038494..a932dfc 100644
--- a/fe/src/test/java/org/apache/impala/util/TestDisjointSet.java
+++ b/fe/src/test/java/org/apache/impala/util/TestDisjointSet.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java b/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
index 7f0a126..f0887ef 100644
--- a/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
+++ b/fe/src/test/java/org/apache/impala/util/TestRequestPoolService.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.util;
+package org.apache.impala.util;
 
 import java.io.File;
 import java.io.IOException;
@@ -29,12 +29,12 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
-import com.cloudera.impala.common.ByteUnits;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TPoolConfig;
-import com.cloudera.impala.thrift.TResolveRequestPoolParams;
-import com.cloudera.impala.thrift.TResolveRequestPoolResult;
+import org.apache.impala.common.ByteUnits;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TPoolConfig;
+import org.apache.impala.thrift.TResolveRequestPoolParams;
+import org.apache.impala.thrift.TResolveRequestPoolResult;
 import com.google.common.collect.Iterables;
 import com.google.common.io.Files;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/ComplexTypesTbl/nonnullable.avsc
----------------------------------------------------------------------
diff --git a/testdata/ComplexTypesTbl/nonnullable.avsc b/testdata/ComplexTypesTbl/nonnullable.avsc
index 2b6bd1c..5f78dcf 100644
--- a/testdata/ComplexTypesTbl/nonnullable.avsc
+++ b/testdata/ComplexTypesTbl/nonnullable.avsc
@@ -1,5 +1,5 @@
 {"type": "record",
- "namespace": "com.cloudera.impala",
+ "namespace": "org.apache.impala",
  "name": "ComplexTypesTbl",
  "fields": [
      {"name": "id", "type": "long"},

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/ComplexTypesTbl/nullable.avsc
----------------------------------------------------------------------
diff --git a/testdata/ComplexTypesTbl/nullable.avsc b/testdata/ComplexTypesTbl/nullable.avsc
index 0520241..564bea8 100644
--- a/testdata/ComplexTypesTbl/nullable.avsc
+++ b/testdata/ComplexTypesTbl/nullable.avsc
@@ -1,5 +1,5 @@
 {"type": "record",
- "namespace": "com.cloudera.impala",
+ "namespace": "org.apache.impala",
  "name": "ComplexTypesTbl",
  "fields": [
      {"name": "id", "type": ["null", "long"]},

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/create-data-source-table.sql
----------------------------------------------------------------------
diff --git a/testdata/bin/create-data-source-table.sql b/testdata/bin/create-data-source-table.sql
index df592fe..3ac0eba 100644
--- a/testdata/bin/create-data-source-table.sql
+++ b/testdata/bin/create-data-source-table.sql
@@ -23,7 +23,7 @@ USE functional;
 DROP DATA SOURCE IF EXISTS AllTypesDataSource;
 CREATE DATA SOURCE AllTypesDataSource
 LOCATION '/test-warehouse/data-sources/test-data-source.jar'
-CLASS 'com.cloudera.impala.extdatasource.AllTypesDataSource'
+CLASS 'org.apache.impala.extdatasource.AllTypesDataSource'
 API_VERSION 'V1';
 
 DROP TABLE IF EXISTS alltypes_datasource;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/generate-block-ids.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/generate-block-ids.sh b/testdata/bin/generate-block-ids.sh
index ee2ee12..85d4403 100755
--- a/testdata/bin/generate-block-ids.sh
+++ b/testdata/bin/generate-block-ids.sh
@@ -18,5 +18,5 @@
 # under the License.
 
 cd $IMPALA_FE_DIR
-mvn -e exec:java -Dexec.mainClass=com.cloudera.impala.testutil.BlockIdGenerator \
+mvn -e exec:java -Dexec.mainClass=org.apache.impala.testutil.BlockIdGenerator \
               -Dexec.classpathScope=test -Dexec.args="../testdata/block-ids"

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/generate-load-nested.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/generate-load-nested.sh b/testdata/bin/generate-load-nested.sh
index 5df5b23..f943358 100755
--- a/testdata/bin/generate-load-nested.sh
+++ b/testdata/bin/generate-load-nested.sh
@@ -75,12 +75,12 @@ do
     FILE_NAME=$(basename ${AVRO_SCHEMA_PATH})
     TABLE_NAME="${FILE_NAME%.*}"
     mvn -f "${IMPALA_HOME}/testdata/pom.xml" exec:java \
-      -Dexec.mainClass="com.cloudera.impala.datagenerator.RandomNestedDataGenerator" \
+      -Dexec.mainClass="org.apache.impala.datagenerator.RandomNestedDataGenerator" \
       -Dexec.args="${AVRO_SCHEMA_PATH} ${NUM_ELEMENTS} ${NUM_FIELDS} ${DB_DIR}/${TABLE_NAME}/${TABLE_NAME}.parquet";
 
     if $FLATTEN; then
       mvn -f "${IMPALA_HOME}/testdata/TableFlattener/pom.xml" \
-        exec:java -Dexec.mainClass=com.cloudera.impala.infra.tableflattener.Main \
+        exec:java -Dexec.mainClass=org.apache.impala.infra.tableflattener.Main \
         -Dexec.arguments="file://${DB_DIR}/${TABLE_NAME}/${TABLE_NAME}.parquet,file://${DB_DIR}/${TABLE_NAME}_flat,-sfile://${AVRO_SCHEMA_PATH}";
     fi
   done

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/random_avro_schema.py
----------------------------------------------------------------------
diff --git a/testdata/bin/random_avro_schema.py b/testdata/bin/random_avro_schema.py
index 5984f86..ccdf25c 100755
--- a/testdata/bin/random_avro_schema.py
+++ b/testdata/bin/random_avro_schema.py
@@ -108,7 +108,7 @@ class AvroGenerator(object):
     tree_root = self._schema_tree_generator.create_tree()
     result = {}
     result['type'] = 'record'
-    result['namespace'] = 'com.cloudera.impala'
+    result['namespace'] = 'org.apache.impala'
     result['name'] = table_name
     result['fields'] = self._convert_struct_fields(tree_root.fields)
     return result

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/testdata/bin/run-sentry-service.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-sentry-service.sh b/testdata/bin/run-sentry-service.sh
index 365a616..cb6de28 100755
--- a/testdata/bin/run-sentry-service.sh
+++ b/testdata/bin/run-sentry-service.sh
@@ -33,5 +33,5 @@ export HADOOP_CLASSPATH=${POSTGRES_JDBC_DRIVER}
 ${SENTRY_HOME}/bin/sentry --command service -c ${SENTRY_SERVICE_CONFIG} &
 
 # Wait for the service to come online
-"$JAVA" -cp $CLASSPATH com.cloudera.impala.testutil.SentryServicePinger \
+"$JAVA" -cp $CLASSPATH org.apache.impala.testutil.SentryServicePinger \
     --config_file "${SENTRY_SERVICE_CONFIG}" -n 30 -s 2



[18/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterGenerator.java b/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterGenerator.java
deleted file mode 100644
index f0cafd5..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterGenerator.java
+++ /dev/null
@@ -1,590 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.Predicate;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.thrift.TRuntimeFilterDesc;
-import com.cloudera.impala.thrift.TRuntimeFilterTargetDesc;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class used for generating and assigning runtime filters to a query plan using
- * runtime filter propagation. Runtime filter propagation is an optimization technique
- * used to filter scanned tuples or scan ranges based on information collected at
- * runtime. A runtime filter is constructed during the build phase of a join node, and is
- * applied at, potentially, multiple scan nodes on the probe side of that join node.
- * Runtime filters are generated from equi-join predicates but they do not replace the
- * original predicates.
- *
- * Example: select * from T1, T2 where T1.a = T2.b and T2.c = '1';
- * Assuming that T1 is a fact table and T2 is a significantly smaller dimension table, a
- * runtime filter is constructed at the join node between tables T1 and T2 while building
- * the hash table on the values of T2.b (rhs of the join condition) from the tuples of T2
- * that satisfy predicate T2.c = '1'. The runtime filter is subsequently sent to the
- * scan node of table T1 and is applied on the values of T1.a (lhs of the join condition)
- * to prune tuples of T2 that cannot be part of the join result.
- *
- * TODO: Consider combining multiple filters, that are applied to the same scan node,
- * into a single filter.
- */
-public final class RuntimeFilterGenerator {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(RuntimeFilterGenerator.class);
-
-  // Map of base table tuple ids to a list of runtime filters that
-  // can be applied at the corresponding scan nodes.
-  private final Map<TupleId, List<RuntimeFilter>> runtimeFiltersByTid_ =
-      Maps.newHashMap();
-
-  // Generator for filter ids
-  private final IdGenerator<RuntimeFilterId> filterIdGenerator =
-      RuntimeFilterId.createGenerator();
-
-  private RuntimeFilterGenerator() {};
-
-  /**
-   * Internal representation of a runtime filter. A runtime filter is generated from
-   * an equi-join predicate of the form <lhs_expr> = <rhs_expr>, where lhs_expr is the
-   * expr on which the filter is applied and must be bound by a single tuple id from
-   * the left plan subtree of the associated join node, while rhs_expr is the expr on
-   * which the filter is built and can be bound by any number of tuple ids from the
-   * right plan subtree. Every runtime filter must record the join node that constructs
-   * the filter and the scan nodes that apply the filter (destination nodes).
-   */
-  public static class RuntimeFilter {
-    // Identifier of the filter (unique within a query)
-    private final RuntimeFilterId id_;
-    // Join node that builds the filter
-    private final JoinNode src_;
-    // Expr (rhs of join predicate) on which the filter is built
-    private final Expr srcExpr_;
-    // Expr (lhs of join predicate) from which the targetExprs_ are generated.
-    private final Expr origTargetExpr_;
-    // Runtime filter targets
-    private final List<RuntimeFilterTarget> targets_ = Lists.newArrayList();
-    // Slots from base table tuples that have value transfer from the slots
-    // of 'origTargetExpr_'. The slots are grouped by tuple id.
-    private final Map<TupleId, List<SlotId>> targetSlotsByTid_;
-    // If true, the join node building this filter is executed using a broadcast join;
-    // set in the DistributedPlanner.createHashJoinFragment()
-    private boolean isBroadcastJoin_;
-    // Estimate of the number of distinct values that will be inserted into this filter,
-    // globally across all instances of the source node. Used to compute an optimal size
-    // for the filter. A value of -1 means no estimate is available, and default filter
-    // parameters should be used.
-    private long ndvEstimate_ = -1;
-    // If true, the filter is produced by a broadcast join and there is at least one
-    // destination scan node which is in the same fragment as the join; set in
-    // DistributedPlanner.createHashJoinFragment().
-    private boolean hasLocalTargets_ = false;
-    // If true, there is at least one destination scan node which is not in the same
-    // fragment as the join that produced the filter; set in
-    // DistributedPlanner.createHashJoinFragment().
-    private boolean hasRemoteTargets_ = false;
-    // If set, indicates that the filter can't be assigned to another scan node.
-    // Once set, it can't be unset.
-    private boolean finalized_ = false;
-
-    /**
-     * Internal representation of a runtime filter target.
-     */
-    private static class RuntimeFilterTarget {
-      // Scan node that applies the filter
-      public ScanNode node;
-      // Expr on which the filter is applied
-      public Expr expr;
-      // Indicates if 'expr' is bound only by partition columns
-      public boolean isBoundByPartitionColumns = false;
-      // Indicates if 'node' is in the same fragment as the join that produces the
-      // filter
-      public boolean isLocalTarget = false;
-
-      public RuntimeFilterTarget(ScanNode targetNode, Expr targetExpr) {
-        node = targetNode;
-        expr = targetExpr;
-      }
-
-      public TRuntimeFilterTargetDesc toThrift() {
-        TRuntimeFilterTargetDesc tFilterTarget = new TRuntimeFilterTargetDesc();
-        tFilterTarget.setNode_id(node.getId().asInt());
-        tFilterTarget.setTarget_expr(expr.treeToThrift());
-        List<SlotId> sids = Lists.newArrayList();
-        expr.getIds(null, sids);
-        List<Integer> tSlotIds = Lists.newArrayListWithCapacity(sids.size());
-        for (SlotId sid: sids) tSlotIds.add(sid.asInt());
-        tFilterTarget.setTarget_expr_slotids(tSlotIds);
-        tFilterTarget.setIs_bound_by_partition_columns(isBoundByPartitionColumns);
-        tFilterTarget.setIs_local_target(isLocalTarget);
-        return tFilterTarget;
-      }
-
-      @Override
-      public String toString() {
-        StringBuilder output = new StringBuilder();
-        return output.append("Target Id: " + node.getId() + " ")
-            .append("Target expr: " + expr.debugString() + " ")
-            .append("Partition columns: " + isBoundByPartitionColumns)
-            .append("Is local: " + isLocalTarget)
-            .toString();
-      }
-    }
-
-    private RuntimeFilter(RuntimeFilterId filterId, JoinNode filterSrcNode,
-        Expr srcExpr, Expr origTargetExpr, Map<TupleId, List<SlotId>> targetSlots) {
-      id_ = filterId;
-      src_ = filterSrcNode;
-      srcExpr_ = srcExpr;
-      origTargetExpr_ = origTargetExpr;
-      targetSlotsByTid_ = targetSlots;
-      computeNdvEstimate();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (!(obj instanceof RuntimeFilter)) return false;
-      return ((RuntimeFilter) obj).id_.equals(id_);
-    }
-
-    @Override
-    public int hashCode() { return id_.hashCode(); }
-
-    public void markFinalized() { finalized_ = true; }
-    public boolean isFinalized() { return finalized_; }
-
-    /**
-     * Serializes a runtime filter to Thrift.
-     */
-    public TRuntimeFilterDesc toThrift() {
-      TRuntimeFilterDesc tFilter = new TRuntimeFilterDesc();
-      tFilter.setFilter_id(id_.asInt());
-      tFilter.setSrc_expr(srcExpr_.treeToThrift());
-      tFilter.setIs_broadcast_join(isBroadcastJoin_);
-      tFilter.setNdv_estimate(ndvEstimate_);
-      tFilter.setHas_local_targets(hasLocalTargets_);
-      tFilter.setHas_remote_targets(hasRemoteTargets_);
-      boolean appliedOnPartitionColumns = true;
-      for (int i = 0; i < targets_.size(); ++i) {
-        RuntimeFilterTarget target = targets_.get(i);
-        tFilter.addToTargets(target.toThrift());
-        tFilter.putToPlanid_to_target_ndx(target.node.getId().asInt(), i);
-        appliedOnPartitionColumns =
-            appliedOnPartitionColumns && target.isBoundByPartitionColumns;
-      }
-      tFilter.setApplied_on_partition_columns(appliedOnPartitionColumns);
-      return tFilter;
-    }
-
-    /**
-     * Static function to create a RuntimeFilter from 'joinPredicate' that is assigned
-     * to the join node 'filterSrcNode'. Returns an instance of RuntimeFilter
-     * or null if a runtime filter cannot be generated from the specified predicate.
-     */
-    public static RuntimeFilter create(IdGenerator<RuntimeFilterId> idGen,
-        Analyzer analyzer, Expr joinPredicate, JoinNode filterSrcNode) {
-      Preconditions.checkNotNull(idGen);
-      Preconditions.checkNotNull(joinPredicate);
-      Preconditions.checkNotNull(filterSrcNode);
-      // Only consider binary equality predicates
-      if (!Predicate.isEquivalencePredicate(joinPredicate)) return null;
-
-      BinaryPredicate normalizedJoinConjunct =
-          SingleNodePlanner.getNormalizedEqPred(joinPredicate,
-              filterSrcNode.getChild(0).getTupleIds(),
-              filterSrcNode.getChild(1).getTupleIds(), analyzer);
-      if (normalizedJoinConjunct == null) return null;
-
-      Expr targetExpr = normalizedJoinConjunct.getChild(0);
-      Expr srcExpr = normalizedJoinConjunct.getChild(1);
-
-      Map<TupleId, List<SlotId>> targetSlots = getTargetSlots(analyzer, targetExpr);
-      Preconditions.checkNotNull(targetSlots);
-      if (targetSlots.isEmpty()) return null;
-
-      // Ensure that the targer expr does not contain TupleIsNull predicates as these
-      // can't be evaluated at a scan node.
-      targetExpr = TupleIsNullPredicate.unwrapExpr(targetExpr.clone());
-      LOG.trace("Generating runtime filter from predicate " + joinPredicate);
-      return new RuntimeFilter(idGen.getNextId(), filterSrcNode,
-          srcExpr, targetExpr, targetSlots);
-    }
-
-    /**
-     * Returns the ids of base table tuple slots on which a runtime filter expr can be
-     * applied. Due to the existence of equivalence classes, a filter expr may be
-     * applicable at multiple scan nodes. The returned slot ids are grouped by tuple id.
-     * Returns an empty collection if the filter expr cannot be applied at a base table.
-     */
-    private static Map<TupleId, List<SlotId>> getTargetSlots(Analyzer analyzer,
-        Expr expr) {
-      // 'expr' is not a SlotRef and may contain multiple SlotRefs
-      List<TupleId> tids = Lists.newArrayList();
-      List<SlotId> sids = Lists.newArrayList();
-      expr.getIds(tids, sids);
-      Map<TupleId, List<SlotId>> slotsByTid = Maps.newHashMap();
-      // We need to iterate over all the slots of 'expr' and check if they have
-      // equivalent slots that are bound by the same base table tuple(s).
-      for (SlotId slotId: sids) {
-        Map<TupleId, List<SlotId>> currSlotsByTid =
-            getBaseTblEquivSlots(analyzer, slotId);
-        if (currSlotsByTid.isEmpty()) return Collections.emptyMap();
-        if (slotsByTid.isEmpty()) {
-          slotsByTid.putAll(currSlotsByTid);
-          continue;
-        }
-
-        // Compute the intersection between tuple ids from 'slotsByTid' and
-        // 'currSlotsByTid'. If the intersection is empty, an empty collection
-        // is returned.
-        Iterator<Map.Entry<TupleId, List<SlotId>>> iter =
-            slotsByTid.entrySet().iterator();
-        while (iter.hasNext()) {
-          Map.Entry<TupleId, List<SlotId>> entry = iter.next();
-          List<SlotId> slotIds = currSlotsByTid.get(entry.getKey());
-          if (slotIds == null) {
-            iter.remove();
-          } else {
-            entry.getValue().addAll(slotIds);
-          }
-        }
-        if (slotsByTid.isEmpty()) return Collections.emptyMap();
-      }
-      return slotsByTid;
-    }
-
-    /**
-     * Static function that returns the ids of slots bound by base table tuples for which
-     * there is a value transfer from 'srcSid'. The slots are grouped by tuple id.
-     */
-    private static Map<TupleId, List<SlotId>> getBaseTblEquivSlots(Analyzer analyzer,
-        SlotId srcSid) {
-      Map<TupleId, List<SlotId>> slotsByTid = Maps.newHashMap();
-      for (SlotId targetSid: analyzer.getValueTransferTargets(srcSid)) {
-        TupleDescriptor tupleDesc = analyzer.getSlotDesc(targetSid).getParent();
-        if (tupleDesc.getTable() == null) continue;
-        List<SlotId> sids = slotsByTid.get(tupleDesc.getId());
-        if (sids == null) {
-          sids = Lists.newArrayList();
-          slotsByTid.put(tupleDesc.getId(), sids);
-        }
-        sids.add(targetSid);
-      }
-      return slotsByTid;
-    }
-
-    public Expr getTargetExpr(PlanNodeId targetPlanNodeId) {
-      for (RuntimeFilterTarget target: targets_) {
-        if (target.node.getId() != targetPlanNodeId) continue;
-        return target.expr;
-      }
-      return null;
-    }
-
-    public List<RuntimeFilterTarget> getTargets() { return targets_; }
-    public boolean hasTargets() { return !targets_.isEmpty(); }
-    public Expr getSrcExpr() { return srcExpr_; }
-    public Expr getOrigTargetExpr() { return origTargetExpr_; }
-    public Map<TupleId, List<SlotId>> getTargetSlots() { return targetSlotsByTid_; }
-    public RuntimeFilterId getFilterId() { return id_; }
-
-    /**
-     * Estimates the selectivity of a runtime filter as the cardinality of the
-     * associated source join node over the cardinality of that join node's left
-     * child.
-     */
-    public double getSelectivity() {
-      if (src_.getCardinality() == -1 || src_.getChild(0).getCardinality() == -1) {
-        return -1;
-      }
-      return src_.getCardinality() / (double) src_.getChild(0).getCardinality();
-    }
-
-    public void addTarget(ScanNode node, Analyzer analyzer, Expr targetExpr) {
-      Preconditions.checkState(targetExpr.isBoundByTupleIds(node.getTupleIds()));
-      RuntimeFilterTarget target = new RuntimeFilterTarget(node, targetExpr);
-      targets_.add(target);
-      // Check if all the slots of targetExpr_ are bound by partition columns
-      TupleDescriptor baseTblDesc = node.getTupleDesc();
-      Table tbl = baseTblDesc.getTable();
-      if (tbl.getNumClusteringCols() == 0) return;
-      List<SlotId> sids = Lists.newArrayList();
-      targetExpr.getIds(null, sids);
-      for (SlotId sid: sids) {
-        SlotDescriptor slotDesc = analyzer.getSlotDesc(sid);
-        if (slotDesc.getColumn() == null
-            || slotDesc.getColumn().getPosition() >= tbl.getNumClusteringCols()) {
-          return;
-        }
-      }
-      target.isBoundByPartitionColumns = true;
-    }
-
-    public void setIsBroadcast(boolean isBroadcast) { isBroadcastJoin_ = isBroadcast; }
-
-    public void computeNdvEstimate() { ndvEstimate_ = src_.getChild(1).getCardinality(); }
-
-    public void computeHasLocalTargets() {
-      Preconditions.checkNotNull(src_.getFragment());
-      Preconditions.checkState(hasTargets());
-      for (RuntimeFilterTarget target: targets_) {
-        Preconditions.checkNotNull(target.node.getFragment());
-        boolean isLocal =
-            src_.getFragment().getId().equals(target.node.getFragment().getId());
-        target.isLocalTarget = isLocal;
-        hasLocalTargets_ = hasLocalTargets_ || isLocal;
-        hasRemoteTargets_ = hasRemoteTargets_ || !isLocal;
-      }
-    }
-
-    /**
-     * Assigns this runtime filter to the corresponding plan nodes.
-     */
-    public void assignToPlanNodes() {
-      Preconditions.checkState(hasTargets());
-      src_.addRuntimeFilter(this);
-      for (RuntimeFilterTarget target: targets_) target.node.addRuntimeFilter(this);
-    }
-
-    public String debugString() {
-      StringBuilder output = new StringBuilder();
-      return output.append("FilterID: " + id_ + " ")
-          .append("Source: " + src_.getId() + " ")
-          .append("SrcExpr: " + getSrcExpr().debugString() +  " ")
-          .append("Target(s): ")
-          .append(Joiner.on(", ").join(targets_) + " ")
-          .append("Selectivity: " + getSelectivity()).toString();
-    }
-  }
-
-  /**
-   * Generates and assigns runtime filters to a query plan tree.
-   */
-  public static void generateRuntimeFilters(Analyzer analyzer, PlanNode plan,
-      int maxNumFilters) {
-    Preconditions.checkArgument(maxNumFilters >= 0);
-    RuntimeFilterGenerator filterGenerator = new RuntimeFilterGenerator();
-    filterGenerator.generateFilters(analyzer, plan);
-    List<RuntimeFilter> filters = Lists.newArrayList(filterGenerator.getRuntimeFilters());
-    if (filters.size() > maxNumFilters) {
-      // If more than 'maxNumFilters' were generated, sort them by increasing selectivity
-      // and keep the 'maxNumFilters' most selective.
-      Collections.sort(filters, new Comparator<RuntimeFilter>() {
-          public int compare(RuntimeFilter a, RuntimeFilter b) {
-            double aSelectivity =
-                a.getSelectivity() == -1 ? Double.MAX_VALUE : a.getSelectivity();
-            double bSelectivity =
-                b.getSelectivity() == -1 ? Double.MAX_VALUE : b.getSelectivity();
-            double diff = aSelectivity - bSelectivity;
-            return (diff < 0.0 ? -1 : (diff > 0.0 ? 1 : 0));
-          }
-        }
-      );
-    }
-    for (RuntimeFilter filter:
-         filters.subList(0, Math.min(filters.size(), maxNumFilters))) {
-      LOG.trace("Runtime filter: " + filter.debugString());
-      filter.assignToPlanNodes();
-    }
-  }
-
-  /**
-   * Returns a set of all the registered runtime filters.
-   */
-  public Set<RuntimeFilter> getRuntimeFilters() {
-    Set<RuntimeFilter> result = Sets.newHashSet();
-    for (List<RuntimeFilter> filters: runtimeFiltersByTid_.values()) {
-      result.addAll(filters);
-    }
-    return result;
-  }
-
-  /**
-   * Generates the runtime filters for a query by recursively traversing the single-node
-   * plan tree rooted at 'root'. In the top-down traversal of the plan tree, candidate
-   * runtime filters are generated from equi-join predicates. In the bottom-up traversal
-   * of the plan tree, the filters are assigned to destination (scan) nodes. Filters
-   * that cannot be assigned to a scan node are discarded.
-   */
-  private void generateFilters(Analyzer analyzer, PlanNode root) {
-    if (root instanceof JoinNode) {
-      JoinNode joinNode = (JoinNode) root;
-      List<Expr> joinConjuncts = Lists.newArrayList();
-      if (!joinNode.getJoinOp().isLeftOuterJoin()
-          && !joinNode.getJoinOp().isFullOuterJoin()
-          && !joinNode.getJoinOp().isAntiJoin()) {
-        // It's not correct to push runtime filters to the left side of a left outer,
-        // full outer or anti join if the filter corresponds to an equi-join predicate
-        // from the ON clause.
-        joinConjuncts.addAll(joinNode.getEqJoinConjuncts());
-      }
-      joinConjuncts.addAll(joinNode.getConjuncts());
-      List<RuntimeFilter> filters = Lists.newArrayList();
-      for (Expr conjunct: joinConjuncts) {
-        RuntimeFilter filter = RuntimeFilter.create(filterIdGenerator, analyzer,
-            conjunct, joinNode);
-        if (filter == null) continue;
-        registerRuntimeFilter(filter);
-        filters.add(filter);
-      }
-      generateFilters(analyzer, root.getChild(0));
-      // Finalize every runtime filter of that join. This is to ensure that we don't
-      // assign a filter to a scan node from the right subtree of joinNode or ancestor
-      // join nodes in case we don't find a destination node in the left subtree.
-      for (RuntimeFilter runtimeFilter: filters) finalizeRuntimeFilter(runtimeFilter);
-      generateFilters(analyzer, root.getChild(1));
-    } else if (root instanceof ScanNode) {
-      assignRuntimeFilters(analyzer, (ScanNode) root);
-    } else {
-      for (PlanNode childNode: root.getChildren()) {
-        generateFilters(analyzer, childNode);
-      }
-    }
-  }
-
-  /**
-   * Registers a runtime filter with the tuple id of every scan node that is a candidate
-   * destination node for that filter.
-   */
-  private void registerRuntimeFilter(RuntimeFilter filter) {
-    Map<TupleId, List<SlotId>> targetSlotsByTid = filter.getTargetSlots();
-    Preconditions.checkState(targetSlotsByTid != null && !targetSlotsByTid.isEmpty());
-    for (TupleId tupleId: targetSlotsByTid.keySet()) {
-      registerRuntimeFilter(filter, tupleId);
-    }
-  }
-
-  /**
-   * Registers a runtime filter with a specific target tuple id.
-   */
-  private void registerRuntimeFilter(RuntimeFilter filter, TupleId targetTid) {
-    Preconditions.checkState(filter.getTargetSlots().containsKey(targetTid));
-    List<RuntimeFilter> filters = runtimeFiltersByTid_.get(targetTid);
-    if (filters == null) {
-      filters = Lists.newArrayList();
-      runtimeFiltersByTid_.put(targetTid, filters);
-    }
-    Preconditions.checkState(!filter.isFinalized());
-    filters.add(filter);
-  }
-
-  /**
-   * Finalizes a runtime filter by disassociating it from all the candidate target scan
-   * nodes that haven't been used as destinations for that filter. Also sets the
-   * finalized_ flag of that filter so that it can't be assigned to any other scan nodes.
-   */
-  private void finalizeRuntimeFilter(RuntimeFilter runtimeFilter) {
-    Set<TupleId> targetTupleIds = Sets.newHashSet();
-    for (RuntimeFilter.RuntimeFilterTarget target: runtimeFilter.getTargets()) {
-      targetTupleIds.addAll(target.node.getTupleIds());
-    }
-    for (TupleId tupleId: runtimeFilter.getTargetSlots().keySet()) {
-      if (!targetTupleIds.contains(tupleId)) {
-        runtimeFiltersByTid_.get(tupleId).remove(runtimeFilter);
-      }
-    }
-    runtimeFilter.markFinalized();
-  }
-
-  /**
-   * Assigns runtime filters to a specific scan node 'scanNode'.
-   * The assigned filters are the ones for which 'scanNode' can be used a destination
-   * node. A scan node may be used as a destination node for multiple runtime filters.
-   * Currently, runtime filters can only be assigned to HdfsScanNodes.
-   */
-  private void assignRuntimeFilters(Analyzer analyzer, ScanNode scanNode) {
-    if (!(scanNode instanceof HdfsScanNode)) return;
-    TupleId tid = scanNode.getTupleIds().get(0);
-    if (!runtimeFiltersByTid_.containsKey(tid)) return;
-    for (RuntimeFilter filter: runtimeFiltersByTid_.get(tid)) {
-      if (filter.isFinalized()) continue;
-      Expr targetExpr = computeTargetExpr(filter, tid, analyzer);
-      if (targetExpr == null) continue;
-      filter.addTarget(scanNode, analyzer, targetExpr);
-    }
-  }
-
-  /**
-   * Computes the target expr for a specified runtime filter 'filter' to be applied at
-   * the scan node with target tuple descriptor 'targetTid'.
-   */
-  private Expr computeTargetExpr(RuntimeFilter filter, TupleId targetTid,
-      Analyzer analyzer) {
-    Expr targetExpr = filter.getOrigTargetExpr();
-    if (!targetExpr.isBound(targetTid)) {
-      Preconditions.checkState(filter.getTargetSlots().containsKey(targetTid));
-      // Modify the filter target expr using the equivalent slots from the scan node
-      // on which the filter will be applied.
-      ExprSubstitutionMap smap = new ExprSubstitutionMap();
-      List<SlotRef> exprSlots = Lists.newArrayList();
-      targetExpr.collect(SlotRef.class, exprSlots);
-      List<SlotId> sids = filter.getTargetSlots().get(targetTid);
-      for (SlotRef slotRef: exprSlots) {
-        for (SlotId sid: sids) {
-          if (analyzer.hasValueTransfer(slotRef.getSlotId(), sid)) {
-            SlotRef newSlotRef = new SlotRef(analyzer.getSlotDesc(sid));
-            newSlotRef.analyzeNoThrow(analyzer);
-            smap.put(slotRef, newSlotRef);
-            break;
-          }
-        }
-      }
-      Preconditions.checkState(exprSlots.size() == smap.size());
-      try {
-        targetExpr = targetExpr.substitute(smap, analyzer, true);
-      } catch (Exception e) {
-        // An exception is thrown if we cannot generate a target expr from this
-        // scan node that has the same type as the lhs expr of the join predicate
-        // from which the runtime filter was generated. We skip that scan node and will
-        // try to assign the filter to a different scan node.
-        //
-        // TODO: Investigate if we can generate a type-compatible source/target expr
-        // pair from that scan node instead of skipping it.
-        return null;
-      }
-    }
-    Preconditions.checkState(
-        targetExpr.getType().matchesType(filter.getSrcExpr().getType()));
-    return targetExpr;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterId.java b/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterId.java
deleted file mode 100644
index 32af78c..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/RuntimeFilterId.java
+++ /dev/null
@@ -1,45 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class RuntimeFilterId extends Id<RuntimeFilterId> {
-  // Construction only allowed via an IdGenerator.
-  protected RuntimeFilterId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<RuntimeFilterId> createGenerator() {
-    return new IdGenerator<RuntimeFilterId>() {
-      @Override
-      public RuntimeFilterId getNextId() { return new RuntimeFilterId(nextId_++); }
-      @Override
-      public RuntimeFilterId getMaxId() { return new RuntimeFilterId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("RF%03d", id_);
-  }
-
-  @Override
-  public int hashCode() { return id_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/ScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/ScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/ScanNode.java
deleted file mode 100644
index b442b4a..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/ScanNode.java
+++ /dev/null
@@ -1,191 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of the common elements of all scan nodes.
- */
-abstract public class ScanNode extends PlanNode {
-  protected final TupleDescriptor desc_;
-
-  // Total number of rows this node is expected to process
-  protected long inputCardinality_ = -1;
-
-  // Counter indicating if partitions have missing statistics
-  protected int numPartitionsMissingStats_ = 0;
-
-  // List of scan-range locations. Populated in init().
-  protected List<TScanRangeLocations> scanRanges_;
-
-  public ScanNode(PlanNodeId id, TupleDescriptor desc, String displayName) {
-    super(id, desc.getId().asList(), displayName);
-    desc_ = desc;
-  }
-
-  public TupleDescriptor getTupleDesc() { return desc_; }
-
-  /**
-   * Checks if this scan is supported based on the types of scanned columns and the
-   * underlying file formats, in particular, whether complex types are supported.
-   *
-   * The default implementation throws if this scan would need to materialize a nested
-   * field or collection. The scan is ok if the table schema contains complex types, as
-   * long as the query does not reference them.
-   *
-   * Subclasses should override this function as appropriate.
-   */
-  protected void checkForSupportedFileFormats() throws NotImplementedException {
-    Preconditions.checkNotNull(desc_);
-    Preconditions.checkNotNull(desc_.getTable());
-    for (SlotDescriptor slotDesc: desc_.getSlots()) {
-      if (slotDesc.getType().isComplexType() || slotDesc.getColumn() == null) {
-        Preconditions.checkNotNull(slotDesc.getPath());
-        throw new NotImplementedException(String.format(
-            "Scan of table '%s' is not supported because '%s' references a nested " +
-            "field/collection.\nComplex types are supported for these file formats: %s.",
-            slotDesc.getPath().toString(), desc_.getAlias(),
-            Joiner.on(", ").join(HdfsFileFormat.complexTypesFormats())));
-      }
-    }
-  }
-
-  /**
-   * Returns all scan ranges plus their locations.
-   */
-  public List<TScanRangeLocations> getScanRangeLocations() {
-    Preconditions.checkNotNull(scanRanges_, "Need to call init() first.");
-    return scanRanges_;
-  }
-
-  @Override
-  protected String debugString() {
-    return Objects.toStringHelper(this)
-        .add("tid", desc_.getId().asInt())
-        .add("tblName", desc_.getTable().getFullName())
-        .add("keyRanges", "")
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  /**
-   * Returns the explain string for table and columns stats to be included into the
-   * a ScanNode's explain string. The given prefix is prepended to each of the lines.
-   * The prefix is used for proper formatting when the string returned by this method
-   * is embedded in a query's explain plan.
-   */
-  protected String getStatsExplainString(String prefix, TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    // Table stats.
-    if (desc_.getTable().getNumRows() == -1) {
-      output.append(prefix + "table stats: unavailable");
-    } else {
-      output.append(prefix + "table stats: " + desc_.getTable().getNumRows() +
-          " rows total");
-      if (numPartitionsMissingStats_ > 0) {
-        output.append(" (" + numPartitionsMissingStats_ + " partition(s) missing stats)");
-      }
-    }
-    output.append("\n");
-
-    // Column stats.
-    List<String> columnsMissingStats = Lists.newArrayList();
-    for (SlotDescriptor slot: desc_.getSlots()) {
-      if (!slot.getStats().hasStats() && slot.getColumn() != null) {
-        columnsMissingStats.add(slot.getColumn().getName());
-      }
-    }
-    if (columnsMissingStats.isEmpty()) {
-      output.append(prefix + "column stats: all");
-    } else if (columnsMissingStats.size() == desc_.getSlots().size()) {
-      output.append(prefix + "column stats: unavailable");
-    } else {
-      output.append(String.format("%scolumns missing stats: %s", prefix,
-          Joiner.on(", ").join(columnsMissingStats)));
-    }
-    return output.toString();
-  }
-
-  /**
-   * Returns true if the table underlying this scan is missing table stats
-   * or column stats relevant to this scan node.
-   */
-  public boolean isTableMissingStats() {
-    return isTableMissingColumnStats() || isTableMissingTableStats();
-  }
-
-  public boolean isTableMissingTableStats() {
-    if (desc_.getTable().getNumRows() == -1) return true;
-    return numPartitionsMissingStats_ > 0;
-  }
-
-  /**
-   * Returns true if the tuple descriptor references a path with a collection type.
-   */
-  public boolean isAccessingCollectionType() {
-    for (Type t: desc_.getPath().getMatchedTypes()) {
-      if (t.isCollectionType()) return true;
-    }
-    return false;
-  }
-
-  public boolean isTableMissingColumnStats() {
-    for (SlotDescriptor slot: desc_.getSlots()) {
-      if (!slot.getStats().hasStats()) return true;
-    }
-    return false;
-  }
-
-  /**
-   * Returns true, if the scanned table is suspected to have corrupt table stats,
-   * in particular, if the scan is non-empty and 'numRows' is 0 or negative (but not -1).
-   */
-  public boolean hasCorruptTableStats() { return false; }
-
-  /**
-   * Helper function to parse a "host:port" address string into TNetworkAddress
-   * This is called with ipaddress:port when doing scan range assignment.
-   */
-  protected static TNetworkAddress addressToTNetworkAddress(String address) {
-    TNetworkAddress result = new TNetworkAddress();
-    String[] hostPort = address.split(":");
-    result.hostname = hostPort[0];
-    result.port = Integer.parseInt(hostPort[1]);
-    return result;
-  }
-
-  @Override
-  public long getInputCardinality() {
-    if (getConjuncts().isEmpty() && hasLimit()) return getLimit();
-    return inputCardinality_;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SelectNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SelectNode.java b/fe/src/main/java/com/cloudera/impala/planner/SelectNode.java
deleted file mode 100644
index b418224..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SelectNode.java
+++ /dev/null
@@ -1,93 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Node that applies conjuncts and a limit clause. Has exactly one child.
- */
-public class SelectNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(SelectNode.class);
-
-  protected SelectNode(PlanNodeId id, PlanNode child, List<Expr> conjuncts) {
-    super(id, "SELECT");
-    addChild(child);
-    conjuncts_.addAll(conjuncts);
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    clearTupleIds();
-    tblRefIds_.addAll(getChild(0).getTblRefIds());
-    tupleIds_.addAll(getChild(0).getTupleIds());
-    nullableTupleIds_.addAll(getChild(0).getNullableTupleIds());
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.SELECT_NODE;
-  }
-
-  @Override
-  public void init(Analyzer analyzer) {
-    analyzer.markConjunctsAssigned(conjuncts_);
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    computeStats(analyzer);
-    createDefaultSmap(analyzer);
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    if (getChild(0).cardinality_ == -1) {
-      cardinality_ = -1;
-    } else {
-      cardinality_ =
-          Math.round(((double) getChild(0).cardinality_) * computeSelectivity());
-      Preconditions.checkState(cardinality_ >= 0);
-    }
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_));
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s:%s\n", prefix, id_.toString(), displayName_));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "predicates: " +
-            getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-}



[07/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
new file mode 100644
index 0000000..a931489
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -0,0 +1,2932 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.analysis.Path.PathType;
+import com.cloudera.impala.authorization.AuthorizationConfig;
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequest;
+import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
+import com.cloudera.impala.authorization.User;
+import com.cloudera.impala.catalog.CatalogException;
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.DataSourceTable;
+import com.cloudera.impala.catalog.DatabaseNotFoundException;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.ImpaladCatalog;
+import com.cloudera.impala.catalog.KuduTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.TableLoadingException;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.Id;
+import com.cloudera.impala.common.IdGenerator;
+import com.cloudera.impala.common.ImpalaException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.common.Pair;
+import com.cloudera.impala.common.PrintUtils;
+import com.cloudera.impala.planner.PlanNode;
+import com.cloudera.impala.service.FeSupport;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TCatalogObjectType;
+import com.cloudera.impala.thrift.TLineageGraph;
+import com.cloudera.impala.thrift.TNetworkAddress;
+import com.cloudera.impala.thrift.TQueryCtx;
+import com.cloudera.impala.util.DisjointSet;
+import com.cloudera.impala.util.EventSequence;
+import com.cloudera.impala.util.ListMap;
+import com.cloudera.impala.util.TSessionStateUtil;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Repository of analysis state for single select block.
+ *
+ * Conjuncts:
+ * Conjuncts are registered during analysis (registerConjuncts()) and assigned during the
+ * planning process (getUnassigned[Oj]Conjuncts()/isConjunctAssigned()/
+ * markConjunctsAssigned()).
+ * All conjuncts are assigned a unique id when initially registered, and all registered
+ * conjuncts are referenced by their id (ie, there are no containers other than the one
+ * holding the referenced conjuncts), to make substitute() simple.
+ *
+ * Slot equivalence classes:
+ * Equivalence of individual slots is computed based on registered equality predicates;
+ * those predicates are either present directly in the query or are implied by the
+ * syntactic elements used in query (example: a GROUP BY clause has implied equality
+ * predicates between the grouping exprs and the grouping slots of the aggregation
+ * output tuple).
+ * Implied equality predicates are registered with createAuxEquivPredicate(); they are
+ * never assigned during plan generation.
+ * Also tracks each catalog object access, so authorization checks can be performed once
+ * analysis is complete.
+ * TODO: We often use the terms stmt/block/analyzer interchangeably, although they may
+ * have slightly different meanings (sometimes depending on the context). Use the terms
+ * more accurately and consistently here and elsewhere.
+ */
+public class Analyzer {
+  // Common analysis error messages
+  public final static String DB_DOES_NOT_EXIST_ERROR_MSG = "Database does not exist: ";
+  public final static String DB_ALREADY_EXISTS_ERROR_MSG = "Database already exists: ";
+  public final static String TBL_DOES_NOT_EXIST_ERROR_MSG = "Table does not exist: ";
+  public final static String TBL_ALREADY_EXISTS_ERROR_MSG = "Table already exists: ";
+  public final static String FN_DOES_NOT_EXIST_ERROR_MSG = "Function does not exist: ";
+  public final static String FN_ALREADY_EXISTS_ERROR_MSG = "Function already exists: ";
+  public final static String DATA_SRC_DOES_NOT_EXIST_ERROR_MSG =
+      "Data source does not exist: ";
+  public final static String DATA_SRC_ALREADY_EXISTS_ERROR_MSG =
+      "Data source already exists: ";
+
+  private final static Logger LOG = LoggerFactory.getLogger(Analyzer.class);
+
+  private final User user_;
+
+  // Indicates whether this query block contains a straight join hint.
+  private boolean isStraightJoin_ = false;
+
+  // Whether to use Hive's auto-generated column labels.
+  private boolean useHiveColLabels_ = false;
+
+  // True if the corresponding select block has a limit and/or offset clause.
+  private boolean hasLimitOffsetClause_ = false;
+
+  // Current depth of nested analyze() calls. Used for enforcing a
+  // maximum expr-tree depth. Needs to be manually maintained by the user
+  // of this Analyzer with incrementCallDepth() and decrementCallDepth().
+  private int callDepth_ = 0;
+
+  // Flag indicating if this analyzer instance belongs to a subquery.
+  private boolean isSubquery_ = false;
+
+  // Flag indicating whether this analyzer belongs to a WITH clause view.
+  private boolean isWithClause_ = false;
+
+  // If set, when privilege requests are registered they will use this error
+  // error message.
+  private String authErrorMsg_;
+
+  // If false, privilege requests will not be registered in the analyzer.
+  private boolean enablePrivChecks_ = true;
+
+  // By default, all registered semi-joined tuples are invisible, i.e., their slots
+  // cannot be referenced. If set, this semi-joined tuple is made visible. Such a tuple
+  // should only be made visible for analyzing the On-clause of its semi-join.
+  // In particular, if there are multiple semi-joins in the same query block, then the
+  // On-clause of any such semi-join is not allowed to reference other semi-joined tuples
+  // except its own. Therefore, only a single semi-joined tuple can be visible at a time.
+  private TupleId visibleSemiJoinedTupleId_ = null;
+
+  public void setIsSubquery() {
+    isSubquery_ = true;
+    globalState_.containsSubquery = true;
+  }
+  public boolean isSubquery() { return isSubquery_; }
+  public boolean setHasPlanHints() { return globalState_.hasPlanHints = true; }
+  public boolean hasPlanHints() { return globalState_.hasPlanHints; }
+  public void setIsWithClause() { isWithClause_ = true; }
+  public boolean isWithClause() { return isWithClause_; }
+
+  // state shared between all objects of an Analyzer tree
+  // TODO: Many maps here contain properties about tuples, e.g., whether
+  // a tuple is outer/semi joined, etc. Remove the maps in favor of making
+  // them properties of the tuple descriptor itself.
+  private static class GlobalState {
+    // TODO: Consider adding an "exec-env"-like global singleton that contains the
+    // catalog and authzConfig.
+    public final ImpaladCatalog catalog;
+    public final TQueryCtx queryCtx;
+    public final AuthorizationConfig authzConfig;
+    public final DescriptorTable descTbl = new DescriptorTable();
+    public final IdGenerator<ExprId> conjunctIdGenerator = ExprId.createGenerator();
+    public final ColumnLineageGraph lineageGraph;
+
+    // True if we are analyzing an explain request. Should be set before starting
+    // analysis.
+    public boolean isExplain;
+
+    // Indicates whether the query has plan hints.
+    public boolean hasPlanHints = false;
+
+    // True if at least one of the analyzers belongs to a subquery.
+    public boolean containsSubquery = false;
+
+    // all registered conjuncts (map from expr id to conjunct)
+    public final Map<ExprId, Expr> conjuncts = Maps.newHashMap();
+
+    // all registered conjuncts bound by a single tuple id; used in getBoundPredicates()
+    public final ArrayList<ExprId> singleTidConjuncts = Lists.newArrayList();
+
+    // eqJoinConjuncts[tid] contains all conjuncts of the form
+    // "<lhs> = <rhs>" in which either lhs or rhs is fully bound by tid
+    // and the other side is not bound by tid (ie, predicates that express equi-join
+    // conditions between two tablerefs).
+    // A predicate such as "t1.a = t2.b" has two entries, one for 't1' and
+    // another one for 't2'.
+    public final Map<TupleId, List<ExprId>> eqJoinConjuncts = Maps.newHashMap();
+
+    // set of conjuncts that have been assigned to some PlanNode
+    public Set<ExprId> assignedConjuncts =
+        Collections.newSetFromMap(new IdentityHashMap<ExprId, Boolean>());
+
+    // map from outer-joined tuple id, i.e., one that is nullable,
+    // to the last Join clause (represented by its rhs table ref) that outer-joined it
+    public final Map<TupleId, TableRef> outerJoinedTupleIds = Maps.newHashMap();
+
+    // Map of registered conjunct to the last full outer join (represented by its
+    // rhs table ref) that outer joined it.
+    public final Map<ExprId, TableRef> fullOuterJoinedConjuncts = Maps.newHashMap();
+
+    // Map of full-outer-joined tuple id to the last full outer join that outer-joined it
+    public final Map<TupleId, TableRef> fullOuterJoinedTupleIds = Maps.newHashMap();
+
+    // Map from semi-joined tuple id, i.e., one that is invisible outside the join's
+    // On-clause, to its Join clause (represented by its rhs table ref). An anti-join is
+    // a kind of semi-join, so anti-joined tuples are also registered here.
+    public final Map<TupleId, TableRef> semiJoinedTupleIds = Maps.newHashMap();
+
+    // Map from right-hand side table-ref id of an outer join to the list of
+    // conjuncts in its On clause. There is always an entry for an outer join, but the
+    // corresponding value could be an empty list. There is no entry for non-outer joins.
+    public final Map<TupleId, List<ExprId>> conjunctsByOjClause = Maps.newHashMap();
+
+    // map from registered conjunct to its containing outer join On clause (represented
+    // by its right-hand side table ref); this is limited to conjuncts that can only be
+    // correctly evaluated by the originating outer join, including constant conjuncts
+    public final Map<ExprId, TableRef> ojClauseByConjunct = Maps.newHashMap();
+
+    // map from registered conjunct to its containing semi join On clause (represented
+    // by its right-hand side table ref)
+    public final Map<ExprId, TableRef> sjClauseByConjunct = Maps.newHashMap();
+
+    // map from registered conjunct to its containing inner join On clause (represented
+    // by its right-hand side table ref)
+    public final Map<ExprId, TableRef> ijClauseByConjunct = Maps.newHashMap();
+
+    // map from slot id to the analyzer/block in which it was registered
+    public final Map<SlotId, Analyzer> blockBySlot = Maps.newHashMap();
+
+    // Tracks all privilege requests on catalog objects.
+    private final Set<PrivilegeRequest> privilegeReqs = Sets.newLinkedHashSet();
+
+    // List of PrivilegeRequest to custom authorization failure error message.
+    // Tracks all privilege requests on catalog objects that need a custom
+    // error message returned to avoid exposing existence of catalog objects.
+    private final List<Pair<PrivilegeRequest, String>> maskedPrivilegeReqs =
+        Lists.newArrayList();
+
+    // accesses to catalog objects
+    // TODO: This can be inferred from privilegeReqs. They should be coalesced.
+    public Set<TAccessEvent> accessEvents = Sets.newHashSet();
+
+    // Tracks all warnings (e.g. non-fatal errors) that were generated during analysis.
+    // These are passed to the backend and eventually propagated to the shell. Maps from
+    // warning message to the number of times that warning was logged (in order to avoid
+    // duplicating the same warning over and over).
+    public final LinkedHashMap<String, Integer> warnings =
+        new LinkedHashMap<String, Integer>();
+
+    public final IdGenerator<EquivalenceClassId> equivClassIdGenerator =
+        EquivalenceClassId.createGenerator();
+
+    // map from equivalence class id to the list of its member slots
+    private final Map<EquivalenceClassId, ArrayList<SlotId>> equivClassMembers =
+        Maps.newHashMap();
+
+    // map from slot id to its equivalence class id;
+    // only visible at the root Analyzer
+    private final Map<SlotId, EquivalenceClassId> equivClassBySlotId = Maps.newHashMap();
+
+    // map for each slot to the canonical slot of its equivalence class
+    private final ExprSubstitutionMap equivClassSmap = new ExprSubstitutionMap();
+
+    // represents the direct and transitive value transfers between slots
+    private ValueTransferGraph valueTransferGraph;
+
+    private final List<Pair<SlotId, SlotId>> registeredValueTransfers =
+        Lists.newArrayList();
+
+    // Bidirectional map between Integer index and TNetworkAddress.
+    // Decreases the size of the scan range locations.
+    private final ListMap<TNetworkAddress> hostIndex = new ListMap<TNetworkAddress>();
+
+    // Timeline of important events in the planning process, used for debugging /
+    // profiling
+    private final EventSequence timeline = new EventSequence("Planner Timeline");
+
+    public GlobalState(ImpaladCatalog catalog, TQueryCtx queryCtx,
+        AuthorizationConfig authzConfig) {
+      this.catalog = catalog;
+      this.queryCtx = queryCtx;
+      this.authzConfig = authzConfig;
+      this.lineageGraph = new ColumnLineageGraph();
+    }
+  };
+
+  private final GlobalState globalState_;
+
+  public boolean containsSubquery() { return globalState_.containsSubquery; }
+
+  /**
+   * Helper function to reset the global state information about the existence of
+   * subqueries.
+   */
+  public void resetSubquery() { globalState_.containsSubquery = false; }
+
+  // An analyzer stores analysis state for a single select block. A select block can be
+  // a top level select statement, or an inline view select block.
+  // ancestors contains the Analyzers of the enclosing select blocks of 'this'
+  // (ancestors[0] contains the immediate parent, etc.).
+  private final ArrayList<Analyzer> ancestors_;
+
+  // map from lowercase table alias to a view definition in this analyzer's scope
+  private final Map<String, View> localViews_ = Maps.newHashMap();
+
+  // Map from lowercase table alias to descriptor. Tables without an explicit alias
+  // are assigned two implicit aliases: the unqualified and fully-qualified table name.
+  // Such tables have two entries pointing to the same descriptor. If an alias is
+  // ambiguous, then this map retains the first entry with that alias to simplify error
+  // checking (duplicate vs. ambiguous alias).
+  private final Map<String, TupleDescriptor> aliasMap_ = Maps.newHashMap();
+
+  // Map from tuple id to its corresponding table ref.
+  private final Map<TupleId, TableRef> tableRefMap_ = Maps.newHashMap();
+
+  // Set of lowercase ambiguous implicit table aliases.
+  private final Set<String> ambiguousAliases_ = Sets.newHashSet();
+
+  // Map from lowercase fully-qualified path to its slot descriptor. Only contains paths
+  // that have a scalar type as destination (see registerSlotRef()).
+  private final Map<String, SlotDescriptor> slotPathMap_ = Maps.newHashMap();
+
+  // Tracks the all tables/views found during analysis that were missing metadata.
+  private Set<TableName> missingTbls_ = new HashSet<TableName>();
+
+  // Indicates whether this analyzer/block is guaranteed to have an empty result set
+  // due to a limit 0 or constant conjunct evaluating to false.
+  private boolean hasEmptyResultSet_ = false;
+
+  // Indicates whether the select-project-join (spj) portion of this query block
+  // is guaranteed to return an empty result set. Set due to a constant non-Having
+  // conjunct evaluating to false.
+  private boolean hasEmptySpjResultSet_ = false;
+
+  public Analyzer(ImpaladCatalog catalog, TQueryCtx queryCtx,
+      AuthorizationConfig authzConfig) {
+    ancestors_ = Lists.newArrayList();
+    globalState_ = new GlobalState(catalog, queryCtx, authzConfig);
+    user_ = new User(TSessionStateUtil.getEffectiveUser(queryCtx.session));
+  }
+
+  /**
+   * Analyzer constructor for nested select block. GlobalState is inherited from the
+   * parentAnalyzer.
+   */
+  public Analyzer(Analyzer parentAnalyzer) {
+    this(parentAnalyzer, parentAnalyzer.globalState_);
+  }
+
+  /**
+   * Analyzer constructor for nested select block with the specified global state.
+   */
+  private Analyzer(Analyzer parentAnalyzer, GlobalState globalState) {
+    ancestors_ = Lists.newArrayList(parentAnalyzer);
+    ancestors_.addAll(parentAnalyzer.ancestors_);
+    globalState_ = globalState;
+    missingTbls_ = parentAnalyzer.missingTbls_;
+    user_ = parentAnalyzer.getUser();
+    useHiveColLabels_ = parentAnalyzer.useHiveColLabels_;
+    authErrorMsg_ = parentAnalyzer.authErrorMsg_;
+    enablePrivChecks_ = parentAnalyzer.enablePrivChecks_;
+    isWithClause_ = parentAnalyzer.isWithClause_;
+  }
+
+  /**
+   * Returns a new analyzer with the specified parent analyzer but with a new
+   * global state.
+   */
+  public static Analyzer createWithNewGlobalState(Analyzer parentAnalyzer) {
+    GlobalState globalState = new GlobalState(parentAnalyzer.globalState_.catalog,
+        parentAnalyzer.getQueryCtx(), parentAnalyzer.getAuthzConfig());
+    return new Analyzer(parentAnalyzer, globalState);
+  }
+
+  /**
+   * Makes the given semi-joined tuple visible such that its slots can be referenced.
+   * If tid is null, makes the currently visible semi-joined tuple invisible again.
+   */
+  public void setVisibleSemiJoinedTuple(TupleId tid) {
+    Preconditions.checkState(tid == null
+        || globalState_.semiJoinedTupleIds.containsKey(tid));
+    Preconditions.checkState(tid == null || visibleSemiJoinedTupleId_ == null);
+    visibleSemiJoinedTupleId_ = tid;
+  }
+
+  public Set<TableName> getMissingTbls() { return missingTbls_; }
+  public boolean hasMissingTbls() { return !missingTbls_.isEmpty(); }
+  public boolean hasAncestors() { return !ancestors_.isEmpty(); }
+  public Analyzer getParentAnalyzer() {
+    return hasAncestors() ? ancestors_.get(0) : null;
+  }
+
+  /**
+   * Returns the analyzer that has an entry for the given tuple descriptor in its
+   * tableRefMap, or null if no such analyzer could be found. Searches the hierarchy
+   * of analyzers bottom-up.
+   */
+  public Analyzer findAnalyzer(TupleId tid) {
+    if (tableRefMap_.containsKey(tid)) return this;
+    if (hasAncestors()) return getParentAnalyzer().findAnalyzer(tid);
+    return null;
+  }
+
+  /**
+   * Returns a list of each warning logged, indicating if it was logged more than once.
+   */
+  public List<String> getWarnings() {
+    List<String> result = new ArrayList<String>();
+    for (Map.Entry<String, Integer> e : globalState_.warnings.entrySet()) {
+      String error = e.getKey();
+      int count = e.getValue();
+      Preconditions.checkState(count > 0);
+      if (count == 1) {
+        result.add(error);
+      } else {
+        result.add(error + " (" + count + " warnings like this)");
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Registers a local view definition with this analyzer. Throws an exception if a view
+   * definition with the same alias has already been registered or if the number of
+   * explicit column labels is greater than the number of columns in the view statement.
+   */
+  public void registerLocalView(View view) throws AnalysisException {
+    Preconditions.checkState(view.isLocalView());
+    if (view.hasColLabels()) {
+      List<String> viewLabels = view.getColLabels();
+      List<String> queryStmtLabels = view.getQueryStmt().getColLabels();
+      if (viewLabels.size() > queryStmtLabels.size()) {
+        throw new AnalysisException("WITH-clause view '" + view.getName() +
+            "' returns " + queryStmtLabels.size() + " columns, but " +
+            viewLabels.size() + " labels were specified. The number of column " +
+            "labels must be smaller or equal to the number of returned columns.");
+      }
+    }
+    if (localViews_.put(view.getName().toLowerCase(), view) != null) {
+      throw new AnalysisException(
+          String.format("Duplicate table alias: '%s'", view.getName()));
+    }
+  }
+
+  /**
+   * Creates an returns an empty TupleDescriptor for the given table ref and registers
+   * it against all its legal aliases. For tables refs with an explicit alias, only the
+   * explicit alias is legal. For tables refs with no explicit alias, the fully-qualified
+   * and unqualified table names are legal aliases. Column references against unqualified
+   * implicit aliases can be ambiguous, therefore, we register such ambiguous aliases
+   * here. Requires that all views have been substituted.
+   * Throws if an existing explicit alias or implicit fully-qualified alias
+   * has already been registered for another table ref.
+   */
+  public TupleDescriptor registerTableRef(TableRef ref) throws AnalysisException {
+    String uniqueAlias = ref.getUniqueAlias();
+    if (aliasMap_.containsKey(uniqueAlias)) {
+      throw new AnalysisException("Duplicate table alias: '" + uniqueAlias + "'");
+    }
+
+    // If ref has no explicit alias, then the unqualified and the fully-qualified table
+    // names are legal implicit aliases. Column references against unqualified implicit
+    // aliases can be ambiguous, therefore, we register such ambiguous aliases here.
+    String unqualifiedAlias = null;
+    String[] aliases = ref.getAliases();
+    if (aliases.length > 1) {
+      unqualifiedAlias = aliases[1];
+      TupleDescriptor tupleDesc = aliasMap_.get(unqualifiedAlias);
+      if (tupleDesc != null) {
+        if (tupleDesc.hasExplicitAlias()) {
+          throw new AnalysisException(
+              "Duplicate table alias: '" + unqualifiedAlias + "'");
+        } else {
+          ambiguousAliases_.add(unqualifiedAlias);
+        }
+      }
+    }
+
+    // Delegate creation of the tuple descriptor to the concrete table ref.
+    TupleDescriptor result = ref.createTupleDescriptor(this);
+    result.setAliases(aliases, ref.hasExplicitAlias());
+    // Register all legal aliases.
+    for (String alias: aliases) {
+      aliasMap_.put(alias, result);
+    }
+    tableRefMap_.put(result.getId(), ref);
+    return result;
+  }
+
+  /**
+   * Resolves the given TableRef into a concrete BaseTableRef, ViewRef or
+   * CollectionTableRef. Returns the new resolved table ref or the given table
+   * ref if it is already resolved.
+   * Registers privilege requests and throws an AnalysisException if the tableRef's
+   * path could not be resolved. The privilege requests are added to ensure that
+   * an AuthorizationException is preferred over an AnalysisException so as not to
+   * accidentally reveal the non-existence of tables/databases.
+   */
+  public TableRef resolveTableRef(TableRef tableRef) throws AnalysisException {
+    // Return the table if it is already resolved.
+    if (tableRef.isResolved()) return tableRef;
+    // Try to find a matching local view.
+    if (tableRef.getPath().size() == 1) {
+      // Searches the hierarchy of analyzers bottom-up for a registered local view with
+      // a matching alias.
+      String viewAlias = tableRef.getPath().get(0).toLowerCase();
+      Analyzer analyzer = this;
+      do {
+        View localView = analyzer.localViews_.get(viewAlias);
+        if (localView != null) return new InlineViewRef(localView, tableRef);
+        analyzer = (analyzer.ancestors_.isEmpty() ? null : analyzer.ancestors_.get(0));
+      } while (analyzer != null);
+    }
+
+    // Resolve the table ref's path and determine what resolved table ref
+    // to replace it with.
+    List<String> rawPath = tableRef.getPath();
+    Path resolvedPath = null;
+    try {
+      resolvedPath = resolvePath(tableRef.getPath(), PathType.TABLE_REF);
+    } catch (AnalysisException e) {
+      if (!hasMissingTbls()) {
+        // Register privilege requests to prefer reporting an authorization error over
+        // an analysis error. We should not accidentally reveal the non-existence of a
+        // table/database if the user is not authorized.
+        if (rawPath.size() > 1) {
+          registerPrivReq(new PrivilegeRequestBuilder()
+              .onTable(rawPath.get(0), rawPath.get(1))
+              .allOf(Privilege.SELECT).toRequest());
+        }
+        registerPrivReq(new PrivilegeRequestBuilder()
+            .onTable(getDefaultDb(), rawPath.get(0))
+            .allOf(Privilege.SELECT).toRequest());
+      }
+      throw e;
+    } catch (TableLoadingException e) {
+      throw new AnalysisException(String.format(
+          "Failed to load metadata for table: '%s'", Joiner.on(".").join(rawPath)), e);
+    }
+
+    Preconditions.checkNotNull(resolvedPath);
+    if (resolvedPath.destTable() != null) {
+      Table table = resolvedPath.destTable();
+      Preconditions.checkNotNull(table);
+      if (table instanceof View) return new InlineViewRef((View) table, tableRef);
+      // The table must be a base table.
+      Preconditions.checkState(table instanceof HdfsTable ||
+          table instanceof KuduTable ||
+          table instanceof HBaseTable ||
+          table instanceof DataSourceTable);
+      return new BaseTableRef(tableRef, resolvedPath);
+    } else {
+      return new CollectionTableRef(tableRef, resolvedPath);
+    }
+  }
+
+  /**
+   * Register conjuncts that are outer joined by a full outer join. For a given
+   * predicate, we record the last full outer join that outer-joined any of its
+   * tuple ids. We need this additional information because full-outer joins obey
+   * different rules with respect to predicate pushdown compared to left and right
+   * outer joins.
+   */
+  public void registerFullOuterJoinedConjunct(Expr e) {
+    Preconditions.checkState(
+        !globalState_.fullOuterJoinedConjuncts.containsKey(e.getId()));
+    List<TupleId> tids = Lists.newArrayList();
+    e.getIds(tids, null);
+    for (TupleId tid: tids) {
+      if (!globalState_.fullOuterJoinedTupleIds.containsKey(tid)) continue;
+      TableRef currentOuterJoin = globalState_.fullOuterJoinedTupleIds.get(tid);
+      globalState_.fullOuterJoinedConjuncts.put(e.getId(), currentOuterJoin);
+      break;
+    }
+    LOG.trace("registerFullOuterJoinedConjunct: " +
+        globalState_.fullOuterJoinedConjuncts.toString());
+  }
+
+  /**
+   * Register tids as being outer-joined by a full outer join clause represented by
+   * rhsRef.
+   */
+  public void registerFullOuterJoinedTids(List<TupleId> tids, TableRef rhsRef) {
+    for (TupleId tid: tids) {
+      globalState_.fullOuterJoinedTupleIds.put(tid, rhsRef);
+    }
+    LOG.trace("registerFullOuterJoinedTids: " +
+        globalState_.fullOuterJoinedTupleIds.toString());
+  }
+
+  /**
+   * Register tids as being outer-joined by Join clause represented by rhsRef.
+   */
+  public void registerOuterJoinedTids(List<TupleId> tids, TableRef rhsRef) {
+    for (TupleId tid: tids) {
+      globalState_.outerJoinedTupleIds.put(tid, rhsRef);
+    }
+    LOG.trace("registerOuterJoinedTids: " + globalState_.outerJoinedTupleIds.toString());
+  }
+
+  /**
+   * Register the given tuple id as being the invisible side of a semi-join.
+   */
+  public void registerSemiJoinedTid(TupleId tid, TableRef rhsRef) {
+    globalState_.semiJoinedTupleIds.put(tid, rhsRef);
+  }
+
+  /**
+   * Returns the descriptor of the given explicit or implicit table alias or null if no
+   * such alias has been registered.
+   * Throws an AnalysisException if the given table alias is ambiguous.
+   */
+  public TupleDescriptor getDescriptor(String tableAlias) throws AnalysisException {
+    String lookupAlias = tableAlias.toLowerCase();
+    if (ambiguousAliases_.contains(lookupAlias)) {
+      throw new AnalysisException(String.format(
+          "Unqualified table alias is ambiguous: '%s'", tableAlias));
+    }
+    return aliasMap_.get(lookupAlias);
+  }
+
+  public TupleDescriptor getTupleDesc(TupleId id) {
+    return globalState_.descTbl.getTupleDesc(id);
+  }
+
+  public SlotDescriptor getSlotDesc(SlotId id) {
+    return globalState_.descTbl.getSlotDesc(id);
+  }
+
+  public TableRef getTableRef(TupleId tid) { return tableRefMap_.get(tid); }
+
+  /**
+   * Given a "table alias"."column alias", return the SlotDescriptor
+   */
+  public SlotDescriptor getSlotDescriptor(String qualifiedColumnName) {
+    return slotPathMap_.get(qualifiedColumnName);
+  }
+
+  /**
+   * Return true if this analyzer has no ancestors. (i.e. false for the analyzer created
+   * for inline views/ union operands, etc.)
+   */
+  public boolean isRootAnalyzer() { return ancestors_.isEmpty(); }
+
+  /**
+   * Returns true if the query block corresponding to this analyzer is guaranteed
+   * to return an empty result set, e.g., due to a limit 0 or a constant predicate
+   * that evaluates to false.
+   */
+  public boolean hasEmptyResultSet() { return hasEmptyResultSet_; }
+  public void setHasEmptyResultSet() { hasEmptyResultSet_ = true; }
+
+  /**
+   * Returns true if the select-project-join portion of this query block returns
+   * an empty result set.
+   */
+  public boolean hasEmptySpjResultSet() { return hasEmptySpjResultSet_; }
+
+  /**
+   * Resolves the given raw path according to the given path type, as follows:
+   * SLOT_REF and STAR: Resolves the path in the context of all registered tuple
+   * descriptors, considering qualified as well as unqualified matches.
+   * TABLE_REF: Resolves the path in the context of all registered tuple descriptors
+   * only considering qualified matches, as well as catalog tables/views.
+   *
+   * Path resolution:
+   * Regardless of the path type, a raw path can have multiple successful resolutions.
+   * A resolution is said to be 'successful' if all raw path elements can be mapped
+   * to a corresponding alias/table/column/field.
+   *
+   * Path legality:
+   * A successful resolution may be illegal with respect to the path type, e.g.,
+   * a SlotRef cannot reference intermediate collection types, etc.
+   *
+   * Path ambiguity:
+   * A raw path is ambiguous if it has multiple legal resolutions. Otherwise,
+   * the ambiguity is resolved in favor of the legal resolution.
+   *
+   * Returns the single legal path resolution if it exists.
+   * Throws if there was no legal resolution or if the path is ambiguous.
+   */
+  public Path resolvePath(List<String> rawPath, PathType pathType)
+      throws AnalysisException, TableLoadingException {
+    // We only allow correlated references in predicates of a subquery.
+    boolean resolveInAncestors = false;
+    if (pathType == PathType.TABLE_REF || pathType == PathType.ANY) {
+      resolveInAncestors = true;
+    } else if (pathType == PathType.SLOT_REF) {
+      resolveInAncestors = isSubquery_;
+    }
+    // Convert all path elements to lower case.
+    ArrayList<String> lcRawPath = Lists.newArrayListWithCapacity(rawPath.size());
+    for (String s: rawPath) lcRawPath.add(s.toLowerCase());
+    return resolvePath(lcRawPath, pathType, resolveInAncestors);
+  }
+
+  private Path resolvePath(List<String> rawPath, PathType pathType,
+      boolean resolveInAncestors) throws AnalysisException, TableLoadingException {
+    // List of all candidate paths with different roots. Paths in this list are initially
+    // unresolved and may be illegal with respect to the pathType.
+    List<Path> candidates = getTupleDescPaths(rawPath);
+
+    LinkedList<String> errors = Lists.newLinkedList();
+    if (pathType == PathType.SLOT_REF || pathType == PathType.STAR) {
+      // Paths rooted at all of the unique registered tuple descriptors.
+      for (TableRef tblRef: tableRefMap_.values()) {
+        candidates.add(new Path(tblRef.getDesc(), rawPath));
+      }
+    } else {
+      // Always prefer table ref paths rooted at a registered tuples descriptor.
+      Preconditions.checkState(pathType == PathType.TABLE_REF ||
+          pathType == PathType.ANY);
+      Path result = resolvePaths(rawPath, candidates, pathType, errors);
+      if (result != null) return result;
+      candidates.clear();
+
+      // Add paths rooted at a table with an unqualified and fully-qualified table name.
+      int end = Math.min(2, rawPath.size());
+      for (int tblNameIdx = 0; tblNameIdx < end; ++tblNameIdx) {
+        String dbName = (tblNameIdx == 0) ? getDefaultDb() : rawPath.get(0);
+        String tblName = rawPath.get(tblNameIdx);
+        Table tbl = null;
+        try {
+          tbl = getTable(dbName, tblName);
+        } catch (AnalysisException e) {
+          if (hasMissingTbls()) throw e;
+          // Ignore other exceptions to allow path resolution to continue.
+        }
+        if (tbl != null) {
+          candidates.add(new Path(tbl, rawPath.subList(tblNameIdx + 1, rawPath.size())));
+        }
+      }
+    }
+
+    Path result = resolvePaths(rawPath, candidates, pathType, errors);
+    if (result == null && resolveInAncestors && hasAncestors()) {
+      result = getParentAnalyzer().resolvePath(rawPath, pathType, true);
+    }
+    if (result == null) {
+      Preconditions.checkState(!errors.isEmpty());
+      throw new AnalysisException(errors.getFirst());
+    }
+    return result;
+  }
+
+  /**
+   * Returns a list of unresolved Paths that are rooted at a registered tuple
+   * descriptor matching a prefix of the given raw path.
+   */
+  public List<Path> getTupleDescPaths(List<String> rawPath)
+      throws AnalysisException {
+    ArrayList<Path> result = Lists.newArrayList();
+
+    // Path rooted at a tuple desc with an explicit or implicit unqualified alias.
+    TupleDescriptor rootDesc = getDescriptor(rawPath.get(0));
+    if (rootDesc != null) {
+      result.add(new Path(rootDesc, rawPath.subList(1, rawPath.size())));
+    }
+
+    // Path rooted at a tuple desc with an implicit qualified alias.
+    if (rawPath.size() > 1) {
+      rootDesc = getDescriptor(rawPath.get(0) + "." + rawPath.get(1));
+      if (rootDesc != null) {
+        result.add(new Path(rootDesc, rawPath.subList(2, rawPath.size())));
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Resolves the given paths and checks them for legality and ambiguity. Returns the
+   * single legal path resolution if it exists, null otherwise.
+   * Populates 'errors' with a a prioritized list of error messages starting with the
+   * most relevant one. The list contains at least one error message if null is returned.
+   */
+  private Path resolvePaths(List<String> rawPath, List<Path> paths, PathType pathType,
+      LinkedList<String> errors) {
+    // For generating error messages.
+    String pathTypeStr = null;
+    String pathStr = Joiner.on(".").join(rawPath);
+    if (pathType == PathType.SLOT_REF) {
+      pathTypeStr = "Column/field reference";
+    } else if (pathType == PathType.TABLE_REF) {
+      pathTypeStr = "Table reference";
+    } else if (pathType == PathType.ANY) {
+      pathTypeStr = "Path";
+    } else {
+      Preconditions.checkState(pathType == PathType.STAR);
+      pathTypeStr = "Star expression";
+      pathStr += ".*";
+    }
+
+    List<Path> legalPaths = Lists.newArrayList();
+    for (Path p: paths) {
+      if (!p.resolve()) continue;
+
+      // Check legality of the resolved path.
+      if (p.isRootedAtTuple() && !isVisible(p.getRootDesc().getId())) {
+        errors.addLast(String.format(
+            "Illegal %s '%s' of semi-/anti-joined table '%s'",
+            pathTypeStr.toLowerCase(), pathStr, p.getRootDesc().getAlias()));
+        continue;
+      }
+      switch (pathType) {
+        // Illegal cases:
+        // 1. Destination type is not a collection.
+        case TABLE_REF: {
+          if (!p.destType().isCollectionType()) {
+            errors.addFirst(String.format(
+                "Illegal table reference to non-collection type: '%s'\n" +
+                    "Path resolved to type: %s", pathStr, p.destType().toSql()));
+            continue;
+          }
+          break;
+        }
+        case SLOT_REF: {
+          // Illegal cases:
+          // 1. Path contains an intermediate collection reference.
+          // 2. Destination of the path is a catalog table or a registered alias.
+          if (p.hasNonDestCollection()) {
+            errors.addFirst(String.format(
+                "Illegal column/field reference '%s' with intermediate " +
+                "collection '%s' of type '%s'",
+                pathStr, p.getFirstCollectionName(),
+                p.getFirstCollectionType().toSql()));
+            continue;
+          }
+          // Error should be "Could not resolve...". No need to add it here explicitly.
+          if (p.getMatchedTypes().isEmpty()) continue;
+          break;
+        }
+        // Illegal cases:
+        // 1. Path contains an intermediate collection reference.
+        // 2. Destination type of the path is not a struct.
+        case STAR: {
+          if (p.hasNonDestCollection()) {
+            errors.addFirst(String.format(
+                "Illegal star expression '%s' with intermediate " +
+                "collection '%s' of type '%s'",
+                pathStr, p.getFirstCollectionName(),
+                p.getFirstCollectionType().toSql()));
+            continue;
+          }
+          if (!p.destType().isStructType()) {
+            errors.addFirst(String.format(
+                "Cannot expand star in '%s' because path '%s' resolved to type '%s'." +
+                "\nStar expansion is only valid for paths to a struct type.",
+                pathStr, Joiner.on(".").join(rawPath), p.destType().toSql()));
+            continue;
+          }
+          break;
+        }
+        case ANY: {
+          // Any path is valid.
+          break;
+        }
+      }
+      legalPaths.add(p);
+    }
+
+    if (legalPaths.size() > 1) {
+      errors.addFirst(String.format("%s is ambiguous: '%s'",
+          pathTypeStr, pathStr));
+      return null;
+    }
+    if (legalPaths.isEmpty()) {
+      if (errors.isEmpty()) {
+        errors.addFirst(String.format("Could not resolve %s: '%s'",
+            pathTypeStr.toLowerCase(), pathStr));
+      }
+      return null;
+    }
+    return legalPaths.get(0);
+  }
+
+  /**
+   * Returns an existing or new SlotDescriptor for the given path. Always returns
+   * a new empty SlotDescriptor for paths with a collection-typed destination.
+   */
+  public SlotDescriptor registerSlotRef(Path slotPath) throws AnalysisException {
+    Preconditions.checkState(slotPath.isRootedAtTuple());
+    // Always register a new slot descriptor for collection types. The BE currently
+    // relies on this behavior for setting unnested collection slots to NULL.
+    if (slotPath.destType().isCollectionType()) {
+      SlotDescriptor result = addSlotDescriptor(slotPath.getRootDesc());
+      result.setPath(slotPath);
+      registerColumnPrivReq(result);
+      return result;
+    }
+    // SlotRefs with a scalar type are registered against the slot's
+    // fully-qualified lowercase path.
+    String key = slotPath.toString();
+    SlotDescriptor existingSlotDesc = slotPathMap_.get(key);
+    if (existingSlotDesc != null) return existingSlotDesc;
+    SlotDescriptor result = addSlotDescriptor(slotPath.getRootDesc());
+    result.setPath(slotPath);
+    slotPathMap_.put(key, result);
+    registerColumnPrivReq(result);
+    return result;
+  }
+
+  /**
+   * Registers a column-level privilege request if 'slotDesc' directly or indirectly
+   * refers to a table column. It handles both scalar and complex-typed columns.
+   */
+  private void registerColumnPrivReq(SlotDescriptor slotDesc) {
+    Preconditions.checkNotNull(slotDesc.getPath());
+    TupleDescriptor tupleDesc = slotDesc.getParent();
+    if (tupleDesc.isMaterialized() && tupleDesc.getTable() != null) {
+      Column column = tupleDesc.getTable().getColumn(
+          slotDesc.getPath().getRawPath().get(0));
+      if (column != null) {
+        registerPrivReq(new PrivilegeRequestBuilder().
+            allOf(Privilege.SELECT).onColumn(tupleDesc.getTableName().getDb(),
+            tupleDesc.getTableName().getTbl(), column.getName()).toRequest());
+      }
+    }
+  }
+
+  /**
+   * Creates a new slot descriptor and related state in globalState.
+   */
+  public SlotDescriptor addSlotDescriptor(TupleDescriptor tupleDesc) {
+    SlotDescriptor result = globalState_.descTbl.addSlotDescriptor(tupleDesc);
+    globalState_.blockBySlot.put(result.getId(), this);
+    return result;
+  }
+
+  /**
+   * Adds a new slot descriptor in tupleDesc that is identical to srcSlotDesc
+   * except for the path and slot id.
+   */
+  public SlotDescriptor copySlotDescriptor(SlotDescriptor srcSlotDesc,
+      TupleDescriptor tupleDesc) {
+    SlotDescriptor result = globalState_.descTbl.addSlotDescriptor(tupleDesc);
+    globalState_.blockBySlot.put(result.getId(), this);
+    result.setSourceExprs(srcSlotDesc.getSourceExprs());
+    result.setLabel(srcSlotDesc.getLabel());
+    result.setStats(srcSlotDesc.getStats());
+    result.setType(srcSlotDesc.getType());
+    result.setItemTupleDesc(srcSlotDesc.getItemTupleDesc());
+    return result;
+  }
+
+  /**
+   * Register all conjuncts in a list of predicates as Having-clause conjuncts.
+   */
+  public void registerConjuncts(List<Expr> l) throws AnalysisException {
+    for (Expr e: l) {
+      registerConjuncts(e, true);
+    }
+  }
+
+  /**
+   * Register all conjuncts in 'conjuncts' that make up the On-clause of the given
+   * right-hand side of a join. Assigns each conjunct a unique id. If rhsRef is
+   * the right-hand side of an outer join, then the conjuncts conjuncts are
+   * registered such that they can only be evaluated by the node implementing that
+   * join.
+   */
+  public void registerOnClauseConjuncts(List<Expr> conjuncts, TableRef rhsRef)
+      throws AnalysisException {
+    Preconditions.checkNotNull(rhsRef);
+    Preconditions.checkNotNull(conjuncts);
+    List<ExprId> ojConjuncts = null;
+    if (rhsRef.getJoinOp().isOuterJoin()) {
+      ojConjuncts = globalState_.conjunctsByOjClause.get(rhsRef.getId());
+      if (ojConjuncts == null) {
+        ojConjuncts = Lists.newArrayList();
+        globalState_.conjunctsByOjClause.put(rhsRef.getId(), ojConjuncts);
+      }
+    }
+    for (Expr conjunct: conjuncts) {
+      conjunct.setIsOnClauseConjunct(true);
+      registerConjunct(conjunct);
+      if (rhsRef.getJoinOp().isOuterJoin()) {
+        globalState_.ojClauseByConjunct.put(conjunct.getId(), rhsRef);
+        ojConjuncts.add(conjunct.getId());
+      }
+      if (rhsRef.getJoinOp().isSemiJoin()) {
+        globalState_.sjClauseByConjunct.put(conjunct.getId(), rhsRef);
+      }
+      if (rhsRef.getJoinOp().isInnerJoin()) {
+        globalState_.ijClauseByConjunct.put(conjunct.getId(), rhsRef);
+      }
+      markConstantConjunct(conjunct, false);
+    }
+  }
+
+  /**
+   * Register all conjuncts that make up 'e'. If fromHavingClause is false, this conjunct
+   * is assumed to originate from a WHERE or ON clause.
+   */
+  public void registerConjuncts(Expr e, boolean fromHavingClause)
+      throws AnalysisException {
+    for (Expr conjunct: e.getConjuncts()) {
+      registerConjunct(conjunct);
+      markConstantConjunct(conjunct, fromHavingClause);
+    }
+  }
+
+  /**
+   * If the given conjunct is a constant non-oj conjunct, marks it as assigned, and
+   * evaluates the conjunct. If the conjunct evaluates to false, marks this query
+   * block as having an empty result set or as having an empty select-project-join
+   * portion, if fromHavingClause is true or false, respectively.
+   * No-op if the conjunct is not constant or is outer joined.
+   * Throws an AnalysisException if there is an error evaluating `conjunct`
+   */
+  private void markConstantConjunct(Expr conjunct, boolean fromHavingClause)
+      throws AnalysisException {
+    if (!conjunct.isConstant() || isOjConjunct(conjunct)) return;
+    markConjunctAssigned(conjunct);
+    if ((!fromHavingClause && !hasEmptySpjResultSet_)
+        || (fromHavingClause && !hasEmptyResultSet_)) {
+      try {
+        if (!FeSupport.EvalPredicate(conjunct, globalState_.queryCtx)) {
+          if (fromHavingClause) {
+            hasEmptyResultSet_ = true;
+          } else {
+            hasEmptySpjResultSet_ = true;
+          }
+        }
+      } catch (InternalException ex) {
+        throw new AnalysisException("Error evaluating \"" + conjunct.toSql() + "\"", ex);
+      }
+    }
+  }
+
+  /**
+   * Assigns a new id to the given conjunct and registers it with all tuple and slot ids
+   * it references and with the global conjunct list.
+   */
+  private void registerConjunct(Expr e) {
+    // always generate a new expr id; this might be a cloned conjunct that already
+    // has the id of its origin set
+    e.setId(globalState_.conjunctIdGenerator.getNextId());
+    globalState_.conjuncts.put(e.getId(), e);
+
+    ArrayList<TupleId> tupleIds = Lists.newArrayList();
+    ArrayList<SlotId> slotIds = Lists.newArrayList();
+    e.getIds(tupleIds, slotIds);
+    registerFullOuterJoinedConjunct(e);
+
+    // register single tid conjuncts
+    if (tupleIds.size() == 1) globalState_.singleTidConjuncts.add(e.getId());
+
+    LOG.trace("register tuple/slotConjunct: " + Integer.toString(e.getId().asInt())
+        + " " + e.toSql() + " " + e.debugString());
+
+    if (!(e instanceof BinaryPredicate)) return;
+    BinaryPredicate binaryPred = (BinaryPredicate) e;
+
+    // check whether this is an equi-join predicate, ie, something of the
+    // form <expr1> = <expr2> where at least one of the exprs is bound by
+    // exactly one tuple id
+    if (binaryPred.getOp() != BinaryPredicate.Operator.EQ &&
+       binaryPred.getOp() != BinaryPredicate.Operator.NULL_MATCHING_EQ &&
+       binaryPred.getOp() != BinaryPredicate.Operator.NOT_DISTINCT) {
+      return;
+    }
+    // the binary predicate must refer to at least two tuples to be an eqJoinConjunct
+    if (tupleIds.size() < 2) return;
+
+    // examine children and update eqJoinConjuncts
+    for (int i = 0; i < 2; ++i) {
+      tupleIds = Lists.newArrayList();
+      binaryPred.getChild(i).getIds(tupleIds, null);
+      if (tupleIds.size() == 1) {
+        if (!globalState_.eqJoinConjuncts.containsKey(tupleIds.get(0))) {
+          List<ExprId> conjunctIds = Lists.newArrayList();
+          conjunctIds.add(e.getId());
+          globalState_.eqJoinConjuncts.put(tupleIds.get(0), conjunctIds);
+        } else {
+          globalState_.eqJoinConjuncts.get(tupleIds.get(0)).add(e.getId());
+        }
+        binaryPred.setIsEqJoinConjunct(true);
+        LOG.trace("register eqJoinConjunct: " + Integer.toString(e.getId().asInt()));
+      }
+    }
+  }
+
+  /**
+   * Create and register an auxiliary predicate to express an equivalence between two
+   * exprs (BinaryPredicate with EQ); this predicate does not need to be assigned, but
+   * it's used for equivalence class computation.
+   * Does nothing if the lhs or rhs expr are NULL. Registering an equivalence with NULL
+   * would be incorrect, because <expr> = NULL is false (even NULL = NULL).
+   */
+  public void createAuxEquivPredicate(Expr lhs, Expr rhs) {
+    // Check the expr type as well as the class because  NullLiteral could have been
+    // implicitly cast to a type different than NULL.
+    if (lhs instanceof NullLiteral || rhs instanceof NullLiteral ||
+        lhs.getType().isNull() || rhs.getType().isNull()) {
+      return;
+    }
+    // create an eq predicate between lhs and rhs
+    BinaryPredicate p = new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs);
+    p.setIsAuxExpr();
+    LOG.trace("register equiv predicate: " + p.toSql() + " " + p.debugString());
+    registerConjunct(p);
+  }
+
+  /**
+   * Creates an inferred equality predicate between the given slots.
+   */
+  public BinaryPredicate createInferredEqPred(SlotId lhsSlotId, SlotId rhsSlotId) {
+    BinaryPredicate pred = new BinaryPredicate(BinaryPredicate.Operator.EQ,
+        new SlotRef(globalState_.descTbl.getSlotDesc(lhsSlotId)),
+        new SlotRef(globalState_.descTbl.getSlotDesc(rhsSlotId)));
+    pred.setIsInferred();
+    // create casts if needed
+    pred.analyzeNoThrow(this);
+    return pred;
+  }
+
+  /**
+   * Return all unassigned non-constant registered conjuncts that are fully bound by
+   * given list of tuple ids. If 'inclOjConjuncts' is false, conjuncts tied to an
+   * Outer Join clause are excluded.
+   */
+  public List<Expr> getUnassignedConjuncts(
+      List<TupleId> tupleIds, boolean inclOjConjuncts) {
+    LOG.trace("getUnassignedConjuncts for " + Id.printIds(tupleIds));
+    List<Expr> result = Lists.newArrayList();
+    for (Expr e: globalState_.conjuncts.values()) {
+      if (e.isBoundByTupleIds(tupleIds)
+          && !e.isAuxExpr()
+          && !globalState_.assignedConjuncts.contains(e.getId())
+          && ((inclOjConjuncts && !e.isConstant())
+              || !globalState_.ojClauseByConjunct.containsKey(e.getId()))) {
+        result.add(e);
+        LOG.trace("getUnassignedConjunct: " + e.toSql());
+      }
+    }
+    return result;
+  }
+
+  public boolean isOjConjunct(Expr e) {
+    return globalState_.ojClauseByConjunct.containsKey(e.getId());
+  }
+
+  public boolean isIjConjunct(Expr e) {
+    return globalState_.ijClauseByConjunct.containsKey(e.getId());
+  }
+
+  public TableRef getFullOuterJoinRef(Expr e) {
+    return globalState_.fullOuterJoinedConjuncts.get(e.getId());
+  }
+
+  public boolean isFullOuterJoined(Expr e) {
+    return globalState_.fullOuterJoinedConjuncts.containsKey(e.getId());
+  }
+
+  /**
+   * Return all unassigned registered conjuncts for node's table ref ids.
+   * Wrapper around getUnassignedConjuncts(List<TupleId> tupleIds).
+   */
+  public List<Expr> getUnassignedConjuncts(PlanNode node) {
+    return getUnassignedConjuncts(node.getTblRefIds());
+  }
+
+  /**
+   * Return all unassigned registered conjuncts that are fully bound by the given
+   * (logical) tuple ids, can be evaluated by 'tupleIds' and are not tied to an
+   * Outer Join clause.
+   */
+  public List<Expr> getUnassignedConjuncts(List<TupleId> tupleIds) {
+    LOG.trace("getUnassignedConjuncts for node with " + Id.printIds(tupleIds));
+    List<Expr> result = Lists.newArrayList();
+    for (Expr e: getUnassignedConjuncts(tupleIds, true)) {
+      if (canEvalPredicate(tupleIds, e)) {
+        result.add(e);
+        LOG.trace("getUnassignedConjunct: " + e.toSql());
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Returns true if e must be evaluated by a join node. Note that it may still be
+   * safe to evaluate e elsewhere as well, but in any case the join must evaluate e.
+   */
+  public boolean evalByJoin(Expr e) {
+    List<TupleId> tids = Lists.newArrayList();
+    e.getIds(tids, null);
+    if (tids.isEmpty()) return false;
+    if (tids.size() > 1 || isOjConjunct(e) || isFullOuterJoined(e)
+        || (isOuterJoined(tids.get(0))
+            && (!e.isOnClauseConjunct() || isIjConjunct(e)))
+        || (isAntiJoinedConjunct(e) && !isSemiJoined(tids.get(0)))) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Return all unassigned conjuncts of the outer join referenced by right-hand side
+   * table ref.
+   */
+  public List<Expr> getUnassignedOjConjuncts(TableRef ref) {
+    Preconditions.checkState(ref.getJoinOp().isOuterJoin());
+    List<Expr> result = Lists.newArrayList();
+    List<ExprId> candidates = globalState_.conjunctsByOjClause.get(ref.getId());
+    if (candidates == null) return result;
+    for (ExprId conjunctId: candidates) {
+      if (!globalState_.assignedConjuncts.contains(conjunctId)) {
+        Expr e = globalState_.conjuncts.get(conjunctId);
+        Preconditions.checkNotNull(e);
+        result.add(e);
+        LOG.trace("getUnassignedOjConjunct: " + e.toSql());
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Return rhs ref of last Join clause that outer-joined id.
+   */
+  public TableRef getLastOjClause(TupleId id) {
+    return globalState_.outerJoinedTupleIds.get(id);
+  }
+
+  /**
+   * Return slot descriptor corresponding to column referenced in the context of
+   * tupleDesc, or null if no such reference exists.
+   */
+  public SlotDescriptor getColumnSlot(TupleDescriptor tupleDesc, Column col) {
+    for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
+      if (slotDesc.getColumn() == col) return slotDesc;
+    }
+    return null;
+  }
+
+  public DescriptorTable getDescTbl() { return globalState_.descTbl; }
+  public ImpaladCatalog getCatalog() { return globalState_.catalog; }
+  public Set<String> getAliases() { return aliasMap_.keySet(); }
+
+  /**
+   * Returns list of candidate equi-join conjuncts to be evaluated by the join node
+   * that is specified by the table ref ids of its left and right children.
+   * If the join to be performed is an outer join, then only equi-join conjuncts
+   * from its On-clause are returned. If an equi-join conjunct is full outer joined,
+   * then it is only added to the result if this join is the one to full-outer join it.
+   */
+  public List<Expr> getEqJoinConjuncts(List<TupleId> lhsTblRefIds,
+      List<TupleId> rhsTblRefIds) {
+    // Contains all equi-join conjuncts that have one child fully bound by one of the
+    // rhs table ref ids (the other child is not bound by that rhs table ref id).
+    List<ExprId> conjunctIds = Lists.newArrayList();
+    for (TupleId rhsId: rhsTblRefIds) {
+      List<ExprId> cids = globalState_.eqJoinConjuncts.get(rhsId);
+      if (cids == null) continue;
+      for (ExprId eid: cids) {
+        if (!conjunctIds.contains(eid)) conjunctIds.add(eid);
+      }
+    }
+
+    // Since we currently prevent join re-reordering across outer joins, we can never
+    // have a bushy outer join with multiple rhs table ref ids. A busy outer join can
+    // only be constructed with an inline view (which has a single table ref id).
+    List<ExprId> ojClauseConjuncts = null;
+    if (rhsTblRefIds.size() == 1) {
+      ojClauseConjuncts = globalState_.conjunctsByOjClause.get(rhsTblRefIds.get(0));
+    }
+
+    // List of table ref ids that the join node will 'materialize'.
+    List<TupleId> nodeTblRefIds = Lists.newArrayList(lhsTblRefIds);
+    nodeTblRefIds.addAll(rhsTblRefIds);
+    List<Expr> result = Lists.newArrayList();
+    for (ExprId conjunctId: conjunctIds) {
+      Expr e = globalState_.conjuncts.get(conjunctId);
+      Preconditions.checkState(e != null);
+      if (!canEvalFullOuterJoinedConjunct(e, nodeTblRefIds) ||
+          !canEvalAntiJoinedConjunct(e, nodeTblRefIds)) {
+        continue;
+      }
+      if (ojClauseConjuncts != null && !ojClauseConjuncts.contains(conjunctId)) continue;
+      result.add(e);
+    }
+    return result;
+  }
+
+  /**
+   * Checks if a conjunct can be evaluated at a node materializing a list of tuple ids
+   * 'tids'.
+   */
+  public boolean canEvalFullOuterJoinedConjunct(Expr e, List<TupleId> tids) {
+    TableRef fullOuterJoin = getFullOuterJoinRef(e);
+    if (fullOuterJoin == null) return true;
+    return tids.containsAll(fullOuterJoin.getAllTableRefIds());
+  }
+
+  /**
+   * Returns true if predicate 'e' can be correctly evaluated by a tree materializing
+   * 'tupleIds', otherwise false:
+   * - the predicate needs to be bound by tupleIds
+   * - an On clause predicate against the non-nullable side of an Outer Join clause
+   *   can only be correctly evaluated by the join node that materializes the
+   *   Outer Join clause
+   * - otherwise, a predicate can only be correctly evaluated if for all outer-joined
+   *   referenced tids the last join to outer-join this tid has been materialized
+   */
+  public boolean canEvalPredicate(List<TupleId> tupleIds, Expr e) {
+    LOG.trace("canEval: " + e.toSql() + " " + e.debugString() + " "
+        + Id.printIds(tupleIds));
+    if (!e.isBoundByTupleIds(tupleIds)) return false;
+    ArrayList<TupleId> tids = Lists.newArrayList();
+    e.getIds(tids, null);
+    if (tids.isEmpty()) return true;
+
+    if (e.isOnClauseConjunct()) {
+      if (tids.size() > 1) {
+        // If the conjunct is from the ON-clause of an anti join, check if we can
+        // assign it to this node.
+        if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
+        // bail if this is from an OJ On clause; the join node will pick
+        // it up later via getUnassignedOjConjuncts()
+        if (globalState_.ojClauseByConjunct.containsKey(e.getId())) return false;
+        // If this is not from an OJ On clause (e.g. where clause or On clause of an
+        // inner join) and is full-outer joined, we need to make sure it is not
+        // assigned below the full outer join node that outer-joined it.
+        return canEvalFullOuterJoinedConjunct(e, tupleIds);
+      }
+
+      TupleId tid = tids.get(0);
+      if (globalState_.ojClauseByConjunct.containsKey(e.getId())) {
+        // OJ On-clause predicate: okay if it's from
+        // the same On clause that makes tid nullable
+        // (otherwise e needn't be true when that tuple is set)
+        if (!globalState_.outerJoinedTupleIds.containsKey(tid)) return false;
+        if (globalState_.ojClauseByConjunct.get(e.getId())
+            != globalState_.outerJoinedTupleIds.get(tid)) {
+          return false;
+        }
+        // Single tuple id conjuncts specified in the FOJ On-clause are not allowed to be
+        // assigned below that full outer join in the operator tree.
+        TableRef tblRef = globalState_.ojClauseByConjunct.get(e.getId());
+        if (tblRef.getJoinOp().isFullOuterJoin()) return false;
+      } else {
+        // Non-OJ On-clause conjunct.
+        if (isOuterJoined(tid)) {
+          // If the conjunct references an outer-joined tuple, then evaluate the
+          // conjunct at the join that the On-clause belongs to.
+          TableRef onClauseTableRef = globalState_.ijClauseByConjunct.get(e.getId());
+          Preconditions.checkNotNull(onClauseTableRef);
+          return tupleIds.containsAll(onClauseTableRef.getAllTableRefIds());
+        }
+        // If this single tid conjunct is from the On-clause of an anti-join, check if we
+        // can assign it to this node.
+        if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
+      }
+      // Single tid predicate that is not from an OJ On-clause and is outer-joined by a
+      // full outer join cannot be assigned below that full outer join in the
+      // operator tree.
+      return canEvalFullOuterJoinedConjunct(e, tupleIds);
+    }
+    if (isAntiJoinedConjunct(e)) return canEvalAntiJoinedConjunct(e, tupleIds);
+
+    for (TupleId tid: tids) {
+      LOG.trace("canEval: checking tid " + tid.toString());
+      TableRef rhsRef = getLastOjClause(tid);
+      // this is not outer-joined; ignore
+      if (rhsRef == null) continue;
+      // check whether the last join to outer-join 'tid' is materialized by tupleIds
+      boolean contains = tupleIds.containsAll(rhsRef.getAllTableRefIds());
+      LOG.trace("canEval: contains=" + (contains ? "true " : "false ")
+          + Id.printIds(tupleIds) + " " + Id.printIds(rhsRef.getAllTableRefIds()));
+      if (!tupleIds.containsAll(rhsRef.getAllTableRefIds())) return false;
+    }
+    return true;
+  }
+
+  /**
+   * Checks if a conjunct from the On-clause of an anti join can be evaluated in a node
+   * that materializes a given list of tuple ids.
+   */
+  public boolean canEvalAntiJoinedConjunct(Expr e, List<TupleId> nodeTupleIds) {
+    TableRef antiJoinRef = getAntiJoinRef(e);
+    if (antiJoinRef == null) return true;
+    List<TupleId> tids = Lists.newArrayList();
+    e.getIds(tids, null);
+    if (tids.size() > 1) {
+      return nodeTupleIds.containsAll(antiJoinRef.getAllTableRefIds())
+          && antiJoinRef.getAllTableRefIds().containsAll(nodeTupleIds);
+    }
+    // A single tid conjunct that is anti-joined can be safely assigned to a
+    // node below the anti join that specified it.
+    return globalState_.semiJoinedTupleIds.containsKey(tids.get(0));
+  }
+
+  /**
+   * Returns a list of predicates that are fully bound by destTid. Predicates are derived
+   * by replacing the slots of a source predicate with slots of the destTid, if for each
+   * source slot there is an equivalent slot in destTid.
+   * In particular, the returned list contains predicates that must be evaluated
+   * at a join node (bound to outer-joined tuple) but can also be safely evaluated by a
+   * plan node materializing destTid. Such predicates are not marked as assigned.
+   * All other inferred predicates are marked as assigned if 'markAssigned'
+   * is true. This function returns bound predicates regardless of whether the source
+   * predicated have been assigned. It is up to the caller to decide if a bound predicate
+   * should actually be used.
+   * Destination slots in destTid can be ignored by passing them in ignoreSlots.
+   * TODO: exclude UDFs from predicate propagation? their overloaded variants could
+   * have very different semantics
+   */
+  public ArrayList<Expr> getBoundPredicates(TupleId destTid, Set<SlotId> ignoreSlots,
+      boolean markAssigned) {
+    ArrayList<Expr> result = Lists.newArrayList();
+    for (ExprId srcConjunctId: globalState_.singleTidConjuncts) {
+      Expr srcConjunct = globalState_.conjuncts.get(srcConjunctId);
+      if (srcConjunct instanceof SlotRef) continue;
+      Preconditions.checkNotNull(srcConjunct);
+      List<TupleId> srcTids = Lists.newArrayList();
+      List<SlotId> srcSids = Lists.newArrayList();
+      srcConjunct.getIds(srcTids, srcSids);
+      Preconditions.checkState(srcTids.size() == 1);
+
+      // Generate slot-mappings to bind srcConjunct to destTid.
+      TupleId srcTid = srcTids.get(0);
+      List<List<SlotId>> allDestSids =
+          getEquivDestSlotIds(srcTid, srcSids, destTid, ignoreSlots);
+      if (allDestSids.isEmpty()) continue;
+
+      // Indicates whether the source slots have equivalent slots that belong
+      // to an outer-joined tuple.
+      boolean hasOuterJoinedTuple = false;
+      for (SlotId srcSid: srcSids) {
+        if (hasOuterJoinedTuple(globalState_.equivClassBySlotId.get(srcSid))) {
+          hasOuterJoinedTuple = true;
+          break;
+        }
+      }
+
+      // It is incorrect to propagate predicates into a plan subtree that is on the
+      // nullable side of an outer join if the predicate evaluates to true when all
+      // its referenced tuples are NULL. The check below is conservative because the
+      // outer-joined tuple making 'hasOuterJoinedTuple' true could be in a parent block
+      // of 'srcConjunct', in which case it is safe to propagate 'srcConjunct' within
+      // child blocks of the outer-joined parent block.
+      // TODO: Make the check precise by considering the blocks (analyzers) where the
+      // outer-joined tuples in the dest slot's equivalence classes appear
+      // relative to 'srcConjunct'.
+      if (hasOuterJoinedTuple && isTrueWithNullSlots(srcConjunct)) continue;
+
+      // if srcConjunct comes out of an OJ's On clause, we need to make sure it's the
+      // same as the one that makes destTid nullable
+      // (otherwise srcConjunct needn't be true when destTid is set)
+      if (globalState_.ojClauseByConjunct.containsKey(srcConjunct.getId())) {
+        if (!globalState_.outerJoinedTupleIds.containsKey(destTid)) continue;
+        if (globalState_.ojClauseByConjunct.get(srcConjunct.getId())
+            != globalState_.outerJoinedTupleIds.get(destTid)) {
+          continue;
+        }
+        // Do not propagate conjuncts from the on-clause of full-outer or anti-joins.
+        TableRef tblRef = globalState_.ojClauseByConjunct.get(srcConjunct.getId());
+        if (tblRef.getJoinOp().isFullOuterJoin()) continue;
+      }
+
+      // Conjuncts specified in the ON-clause of an anti-join must be evaluated at that
+      // join node.
+      if (isAntiJoinedConjunct(srcConjunct)) continue;
+
+      // Generate predicates for all src-to-dest slot mappings.
+      for (List<SlotId> destSids: allDestSids) {
+        Preconditions.checkState(destSids.size() == srcSids.size());
+        Expr p;
+        if (srcSids.containsAll(destSids)) {
+          p = srcConjunct;
+        } else {
+          ExprSubstitutionMap smap = new ExprSubstitutionMap();
+          for (int i = 0; i < srcSids.size(); ++i) {
+            smap.put(
+                new SlotRef(globalState_.descTbl.getSlotDesc(srcSids.get(i))),
+                new SlotRef(globalState_.descTbl.getSlotDesc(destSids.get(i))));
+          }
+          try {
+            p = srcConjunct.trySubstitute(smap, this, false);
+          } catch (ImpalaException exc) {
+            // not an executable predicate; ignore
+            continue;
+          }
+          // Unset the id because this bound predicate itself is not registered, and
+          // to prevent callers from inadvertently marking the srcConjunct as assigned.
+          p.setId(null);
+          if (p instanceof BinaryPredicate) ((BinaryPredicate) p).setIsInferred();
+          LOG.trace("new pred: " + p.toSql() + " " + p.debugString());
+        }
+
+        if (markAssigned) {
+          // predicate assignment doesn't hold if:
+          // - the application against slotId doesn't transfer the value back to its
+          //   originating slot
+          // - the original predicate is on an OJ'd table but doesn't originate from
+          //   that table's OJ clause's ON clause (if it comes from anywhere but that
+          //   ON clause, it needs to be evaluated directly by the join node that
+          //   materializes the OJ'd table)
+          boolean reverseValueTransfer = true;
+          for (int i = 0; i < srcSids.size(); ++i) {
+            if (!hasValueTransfer(destSids.get(i), srcSids.get(i))) {
+              reverseValueTransfer = false;
+              break;
+            }
+          }
+
+          // Check if either srcConjunct or the generated predicate needs to be evaluated
+          // at a join node (IMPALA-2018).
+          boolean evalByJoin =
+              (evalByJoin(srcConjunct)
+               && (globalState_.ojClauseByConjunct.get(srcConjunct.getId())
+                != globalState_.outerJoinedTupleIds.get(srcTid)))
+              || (evalByJoin(p)
+                  && (globalState_.ojClauseByConjunct.get(p.getId())
+                   != globalState_.outerJoinedTupleIds.get(destTid)));
+
+          // mark all bound predicates including duplicate ones
+          if (reverseValueTransfer && !evalByJoin) markConjunctAssigned(srcConjunct);
+        }
+
+        // check if we already created this predicate
+        if (!result.contains(p)) result.add(p);
+      }
+    }
+    return result;
+  }
+
+  public ArrayList<Expr> getBoundPredicates(TupleId destTid) {
+    return getBoundPredicates(destTid, new HashSet<SlotId>(), true);
+  }
+
+  /**
+   * Modifies the analysis state associated with the rhs table ref of an outer join
+   * to accomodate a join inversion that changes the rhs table ref of the join from
+   * oldRhsTbl to newRhsTbl.
+   * TODO: Revisit this function and how outer joins are inverted. This function
+   * should not be necessary because the semantics of an inverted outer join do
+   * not change. This function will naturally become obsolete when we can transform
+   * outer joins with otherPredicates into inner joins.
+   */
+  public void invertOuterJoinState(TableRef oldRhsTbl, TableRef newRhsTbl) {
+    Preconditions.checkState(oldRhsTbl.getJoinOp().isOuterJoin());
+    // Invert analysis state for an outer join.
+    List<ExprId> conjunctIds =
+        globalState_.conjunctsByOjClause.remove(oldRhsTbl.getId());
+    if (conjunctIds != null) {
+      globalState_.conjunctsByOjClause.put(newRhsTbl.getId(), conjunctIds);
+      for (ExprId eid: conjunctIds) {
+        globalState_.ojClauseByConjunct.put(eid, newRhsTbl);
+      }
+    } else {
+      // An outer join is allowed not to have an On-clause if the rhs table ref is
+      // correlated or relative.
+      Preconditions.checkState(oldRhsTbl.isCorrelated() || oldRhsTbl.isRelative());
+    }
+    for (Map.Entry<TupleId, TableRef> e: globalState_.outerJoinedTupleIds.entrySet()) {
+      if (e.getValue() == oldRhsTbl) e.setValue(newRhsTbl);
+    }
+  }
+
+  /**
+   * For each equivalence class, adds/removes predicates from conjuncts such that it
+   * contains a minimum set of <lhsSlot> = <rhsSlot> predicates that establish the known
+   * equivalences between slots in lhsTids and rhsTids which must be disjoint.
+   * Preserves original conjuncts when possible. Assumes that predicates for establishing
+   * equivalences among slots in only lhsTids and only rhsTids have already been
+   * established. This function adds the remaining predicates to "connect" the disjoint
+   * equivalent slot sets of lhsTids and rhsTids.
+   * The intent of this function is to enable construction of a minimum spanning tree
+   * to cover the known slot equivalences. This function should be called for join
+   * nodes during plan generation to (1) remove redundant join predicates, and (2)
+   * establish equivalences among slots materialized at that join node.
+   * TODO: Consider optimizing for the cheapest minimum set of predicates.
+   * TODO: Consider caching the DisjointSet during plan generation instead of
+   * re-creating it here on every invocation.
+   */
+  public <T extends Expr> void createEquivConjuncts(List<TupleId> lhsTids,
+      List<TupleId> rhsTids, List<T> conjuncts) {
+    Preconditions.checkState(Collections.disjoint(lhsTids, rhsTids));
+
+    // Equivalence classes only containing slots belonging to lhsTids.
+    Map<EquivalenceClassId, List<SlotId>> lhsEquivClasses =
+        getEquivClasses(lhsTids);
+
+    // Equivalence classes only containing slots belonging to rhsTids.
+    Map<EquivalenceClassId, List<SlotId>> rhsEquivClasses =
+        getEquivClasses(rhsTids);
+
+    // Maps from a slot id to its set of equivalent slots. Used to track equivalences
+    // that have been established by predicates assigned/generated to plan nodes
+    // materializing lhsTids as well as the given conjuncts.
+    DisjointSet<SlotId> partialEquivSlots = new DisjointSet<SlotId>();
+    // Add the partial equivalences to the partialEquivSlots map. The equivalent-slot
+    // sets of slots from lhsTids are disjoint from those of slots from rhsTids.
+    // We need to 'connect' the disjoint slot sets by constructing a new predicate
+    // for each equivalence class (unless there is already one in 'conjuncts').
+    for (List<SlotId> partialEquivClass: lhsEquivClasses.values()) {
+      partialEquivSlots.bulkUnion(partialEquivClass);
+    }
+    for (List<SlotId> partialEquivClass: rhsEquivClasses.values()) {
+      partialEquivSlots.bulkUnion(partialEquivClass);
+    }
+
+    // Set of outer-joined slots referenced by conjuncts.
+    Set<SlotId> outerJoinedSlots = Sets.newHashSet();
+
+    // Update partialEquivSlots based on equality predicates in 'conjuncts'. Removes
+    // redundant conjuncts, unless they reference outer-joined slots (see below).
+    Iterator<T> conjunctIter = conjuncts.iterator();
+    while (conjunctIter.hasNext()) {
+      Expr conjunct = conjunctIter.next();
+      Pair<SlotId, SlotId> eqSlots = BinaryPredicate.getEqSlots(conjunct);
+      if (eqSlots == null) continue;
+      EquivalenceClassId firstEqClassId = getEquivClassId(eqSlots.first);
+      EquivalenceClassId secondEqClassId = getEquivClassId(eqSlots.second);
+      // slots may not be in the same eq class due to outer joins
+      if (!firstEqClassId.equals(secondEqClassId)) continue;
+
+      // Retain an otherwise redundant predicate if it references a slot of an
+      // outer-joined tuple that is not already referenced by another join predicate
+      // to maintain that the rows must satisfy outer-joined-slot IS NOT NULL
+      // (otherwise NULL tuples from outer joins could survive).
+      // TODO: Consider better fixes for outer-joined slots: (1) Create IS NOT NULL
+      // predicates and place them at the lowest possible plan node. (2) Convert outer
+      // joins into inner joins (or full outer joins into left/right outer joins).
+      boolean filtersOuterJoinNulls = false;
+      if (isOuterJoined(eqSlots.first)
+          && lhsTids.contains(getTupleId(eqSlots.first))
+          && !outerJoinedSlots.contains(eqSlots.first)) {
+        outerJoinedSlots.add(eqSlots.first);
+        filtersOuterJoinNulls = true;
+      }
+      if (isOuterJoined(eqSlots.second)
+          && lhsTids.contains(getTupleId(eqSlots.second))
+          && !outerJoinedSlots.contains(eqSlots.second)) {
+        outerJoinedSlots.add(eqSlots.second);
+        filtersOuterJoinNulls = true;
+      }
+      // retain conjunct if it connects two formerly unconnected equiv classes or
+      // it is required for outer-join semantics
+      if (!partialEquivSlots.union(eqSlots.first, eqSlots.second)
+          && !filtersOuterJoinNulls) {
+        conjunctIter.remove();
+      }
+    }
+
+    // For each equivalence class, construct a new predicate to 'connect' the disjoint
+    // slot sets.
+    for (Map.Entry<EquivalenceClassId, List<SlotId>> rhsEquivClass:
+      rhsEquivClasses.entrySet()) {
+      List<SlotId> lhsSlots = lhsEquivClasses.get(rhsEquivClass.getKey());
+      if (lhsSlots == null) continue;
+      List<SlotId> rhsSlots = rhsEquivClass.getValue();
+      Preconditions.checkState(!lhsSlots.isEmpty() && !rhsSlots.isEmpty());
+
+      if (!partialEquivSlots.union(lhsSlots.get(0), rhsSlots.get(0))) continue;
+      // Do not create a new predicate from slots that are full outer joined because that
+      // predicate may be incorrectly assigned to a node below the associated full outer
+      // join.
+      if (isFullOuterJoined(lhsSlots.get(0)) || isFullOuterJoined(rhsSlots.get(0))) {
+        continue;
+      }
+      T newEqPred = (T) createInferredEqPred(lhsSlots.get(0), rhsSlots.get(0));
+      if (!hasMutualValueTransfer(lhsSlots.get(0), rhsSlots.get(0))) continue;
+      conjuncts.add(newEqPred);
+    }
+  }
+
+  /**
+   * For each equivalence class, adds/removes predicates from conjuncts such that
+   * it contains a minimum set of <slot> = <slot> predicates that establish
+   * the known equivalences between slots belonging to tid. Preserves original
+   * conjuncts when possible.
+   * The intent of this function is to enable construction of a minimum spanning tree
+   * to cover the known slot equivalences. This function should be called to add
+   * conjuncts to plan nodes that materialize a new tuple, e.g., scans and aggregations.
+   * Does not enforce equivalence between slots in ignoreSlots. Equivalences (if any)
+   * among slots in ignoreSlots are assumed to have already been enforced.
+   * TODO: Consider optimizing for the cheapest minimum set of predicates.
+   */
+  public <T extends Expr> void createEquivConjuncts(TupleId tid, List<T> conjuncts,
+      Set<SlotId> ignoreSlots) {
+    // Maps from a slot id to its set of equivalent slots. Used to track equivalences
+    // that have been established by 'conjuncts' and the 'ignoredsSlots'.
+    DisjointSet<SlotId> partialEquivSlots = new DisjointSet<SlotId>();
+
+    // Treat ignored slots as already connected. Add the ignored slots at this point
+    // such that redundant conjuncts are removed.
+    partialEquivSlots.bulkUnion(ignoreSlots);
+    partialEquivSlots.checkConsistency();
+
+    // Update partialEquivSlots based on equality predicates in 'conjuncts'. Removes
+    // redundant conjuncts, unless they reference outer-joined slots (see below).
+    Iterator<T> conjunctIter = conjuncts.iterator();
+    while (conjunctIter.hasNext()) {
+      Expr conjunct = conjunctIter.next();
+      Pair<SlotId, SlotId> eqSlots = BinaryPredicate.getEqSlots(conjunct);
+      if (eqSlots == null) continue;
+      EquivalenceClassId firstEqClassId = getEquivClassId(eqSlots.first);
+      EquivalenceClassId secondEqClassId = getEquivClassId(eqSlots.second);
+      // slots may not be in the same eq class due to outer joins
+      if (!firstEqClassId.equals(secondEqClassId)) continue;
+      // update equivalences and remove redundant conjuncts
+      if (!partialEquivSlots.union(eqSlots.first, eqSlots.second)) conjunctIter.remove();
+    }
+    // Suppose conjuncts had these predicates belonging to equivalence classes e1 and e2:
+    // e1: s1 = s2, s3 = s4, s3 = s5
+    // e2: s10 = s11
+    // The conjunctsEquivSlots should contain the following entries at this point:
+    // s1 -> {s1, s2}
+    // s2 -> {s1, s2}
+    // s3 -> {s3, s4, s5}
+    // s4 -> {s3, s4, s5}
+    // s5 -> {s3, s4, s5}
+    // s10 -> {s10, s11}
+    // s11 -> {s10, s11}
+    // Assuming e1 = {s1, s2, s3, s4, s5} we need to generate one additional equality
+    // predicate to "connect" {s1, s2} and {s3, s4, s5}.
+
+    // These are the equivalences that need to be established by constructing conjuncts
+    // to form a minimum spanning tree.
+    Map<EquivalenceClassId, List<SlotId>> targetEquivClasses =
+        getEquivClasses(Lists.newArrayList(tid));
+    for (Map.Entry<EquivalenceClassId, List<SlotId>> targetEquivClass:
+      targetEquivClasses.entrySet()) {
+      // Loop over all pairs of equivalent slots and merge their disjoint slots sets,
+      // creating missing equality predicates as necessary.
+      List<SlotId> slotIds = targetEquivClass.getValue();
+      boolean done = false;
+      for (int i = 1; i < slotIds.size(); ++i) {
+        SlotId rhs = slotIds.get(i);
+        for (int j = 0; j < i; ++j) {
+          SlotId lhs = slotIds.get(j);
+          if (!partialEquivSlots.union(lhs, rhs)) continue;
+          if (!hasMutualValueTransfer(lhs, rhs)) continue;
+          conjuncts.add((T) createInferredEqPred(lhs, rhs));
+          // Check for early termination.
+          if (partialEquivSlots.get(lhs).size() == slotIds.size()) {
+            done = true;
+            break;
+          }
+        }
+        if (done) break;
+      }
+    }
+  }
+
+  public <T extends Expr> void createEquivConjuncts(TupleId tid, List<T> conjuncts) {
+    createEquivConjuncts(tid, conjuncts, new HashSet<SlotId>());
+  }
+
+  /**
+   * Returns a map of partial equivalence classes that only contains slot ids belonging
+   * to the given tuple ids. Only contains equivalence classes with more than one member.
+   */
+  private Map<EquivalenceClassId, List<SlotId>> getEquivClasses(List<TupleId> tids) {
+    Map<EquivalenceClassId, List<SlotId>> result = Maps.newHashMap();
+    for (TupleId tid: tids) {
+      for (SlotDescriptor slotDesc: getTupleDesc(tid).getSlots()) {
+        EquivalenceClassId eqClassId = getEquivClassId(slotDesc.getId());
+        // Ignore equivalence classes that are empty or only have a single member.
+        if (globalState_.equivClassMembers.get(eqClassId).size() <= 1) continue;
+        List<SlotId> slotIds = result.get(eqClassId);
+        if (slotIds == null) {
+          slotIds = Lists.newArrayList();
+          result.put(eqClassId, slotIds);
+        }
+        slotIds.add(slotDesc.getId());
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Returns a list of slot mappings from srcTid to destTid for the purpose of predicate
+   * propagation. Each mapping assigns every slot in srcSids to an equivalent slot in
+   * destTid. Does not generate all possible mappings, but limits the results to
+   * useful and/or non-redundant mappings, i.e., those mappings that would improve
+   * the performance of query execution.
+   */
+  private List<List<SlotId>> getEquivDestSlotIds(TupleId srcTid, List<SlotId> srcSids,
+      TupleId destTid, Set<SlotId> ignoreSlots) {
+    List<List<SlotId>> allDestSids = Lists.newArrayList();
+    TupleDescriptor destTupleDesc = getTupleDesc(destTid);
+    if (srcSids.size() == 1) {
+      // Generate all mappings to propagate predicates of the form <slot> <op> <constant>
+      // to as many destination slots as possible.
+      // TODO: If srcTid == destTid we could limit the mapping to partition
+      // columns because mappings to non-partition columns do not provide
+      // a performance benefit.
+      SlotId srcSid = srcSids.get(0);
+      for (SlotDescriptor destSlot: destTupleDesc.getSlots()) {
+        if (ignoreSlots.contains(destSlot.getId())) continue;
+        if (hasValueTransfer(srcSid, destSlot.getId())) {
+          allDestSids.add(Lists.newArrayList(destSlot.getId()));
+        }
+      }
+    } else if (srcTid.equals(destTid)) {
+      // Multiple source slot ids and srcTid == destTid. Inter-tuple transfers are
+      // already expressed by the original conjuncts. Any mapping would be redundant.
+      // Still add srcSids to the result because we rely on getBoundPredicates() to
+      // include predicates that can safely be evaluated below an outer join, but must
+      // also be evaluated by the join itself (evalByJoin() == true).
+      allDestSids.add(srcSids);
+    } else {
+      // Multiple source slot ids and srcTid != destTid. Pick the first mapping
+      // where each srcSid is mapped to a different destSid to avoid generating
+      // redundant and/or trivial predicates.
+      // TODO: This approach is not guaranteed to find the best slot mapping
+      // (e.g., against partition columns) or all non-redundant mappings.
+      // The limitations are show in predicate-propagation.test.
+      List<SlotId> destSids = Lists.newArrayList();
+      for (SlotId srcSid: srcSids) {
+        for (SlotDescriptor destSlot: destTupleDesc.getSlots()) {
+          if (ignoreSlots.contains(destSlot.getId())) continue;
+          if (hasValueTransfer(srcSid, destSlot.getId())
+              && !destSids.contains(destSlot.getId())) {
+            destSids.add(destSlot.getId());
+            break;
+          }
+        }
+      }
+      if (destSids.size() == srcSids.size()) allDestSids.add(destSids);
+    }
+    return allDestSids;
+  }
+
+  /**
+   * Returns true if the equivalence class identified by 'eqClassId' contains
+   * a slot belonging to an outer-joined tuple.
+   */
+  private boolean hasOuterJoinedTuple(EquivalenceClassId eqClassId) {
+    ArrayList<SlotId> eqClass = globalState_.equivClassMembers.get(eqClassId);
+    for (SlotId s: eqClass) {
+      if (isOuterJoined(getTupleId(s))) return true;
+    }
+    return false;
+  }
+
+  /**
+   * Returns true if 'p' evaluates to true when all its referenced slots are NULL,
+   * false otherwise.
+   * TODO: Can we avoid dealing with the exceptions thrown by analysis and eval?
+   */
+  public boolean isTrueWithNullSlots(Expr p) {
+    // Construct predicate with all SlotRefs substituted by NullLiterals.
+    List<SlotRef> slotRefs = Lists.newArrayList();
+    p.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
+
+    // Map for substituting SlotRefs with NullLiterals.
+    ExprSubstitutionMap nullSmap = new ExprSubstitutionMap();
+    for (SlotRef slotRef: slotRefs) {
+        // Preserve the original SlotRef type to ensure all substituted
+        // subexpressions in the predicate have the same return type and
+        // function signature as in the original predicate.
+        nullSmap.put(slotRef.clone(), NullLiteral.create(slotRef.getType()));
+    }
+    Expr nullTuplePred = p.substitute(nullSmap, this, false);
+    try {
+      return FeSupport.EvalPredicate(nullTuplePred, getQueryCtx());
+    } catch (InternalException e) {
+      Preconditions.checkState(false, "Failed to evaluate generated predicate: "
+          + nullTuplePred.toSql() + "." + e.getMessage());
+    }
+    return true;
+  }
+
+  public TupleId getTupleId(SlotId slotId) {
+    return globalState_.descTbl.getSlotDesc(slotId).getParent().getId();
+  }
+
+  public void registerValueTransfer(SlotId id1, SlotId id2) {
+    globalState_.registeredValueTransfers.add(new Pair(id1, id2));
+  }
+
+  public boolean isOuterJoined(TupleId tid) {
+    return globalState_.outerJoinedTupleIds.containsKey(tid);
+  }
+
+  public boolean isOuterJoined(SlotId sid) {
+    return isOuterJoined(getTupleId(sid));
+  }
+
+  public boolean isSemiJoined(TupleId tid) {
+    return globalState_.semiJoinedTupleIds.containsKey(tid);
+  }
+
+  public boolean isAntiJoinedConjunct(Expr e) {
+    return getAntiJoinRef(e) != null;
+  }
+
+  public TableRef getAntiJoinRef(Expr e) {
+    TableRef tblRef = globalState_.sjClauseByConjunct.get(e.getId());
+    if (tblRef == null) return null;
+    return (tblRef.getJoinOp().isAntiJoin()) ? tblRef : null;
+  }
+
+  public boolean isFullOuterJoined(TupleId tid) {
+    return globalState_.fullOuterJoinedTupleIds.containsKey(tid);
+  }
+
+  public boolean isFullOuterJoined(SlotId sid) {
+    return isFullOuterJoined(getTupleId(sid));
+  }
+
+  public boolean isVisible(TupleId tid) {
+    return tid == visibleSemiJoinedTupleId_ || !isSemiJoined(tid);
+  }
+
+  public boolean containsOuterJoinedTid(List<TupleId> tids) {
+    for (TupleId tid: tids) {
+      if (isOuterJoined(tid)) return true;
+    }
+    return false;
+  }
+
+  /**
+   * Populate globalState.valueTransfer based on the registered equi-join predicates
+   * of the form <slotref> = <slotref>.
+   */
+  public void computeEquivClasses() {
+    globalState_.valueTransferGraph = new ValueTransferGraph();
+    globalState_.valueTransferGraph.computeValueTransfers();
+
+    // we start out by assigning each slot to its own equiv class
+    int numSlots = globalState_.descTbl.getMaxSlotId().asInt() + 1;
+    for (int i = 0; i < numSlots; ++i) {
+      EquivalenceClassId id = globalState_.equivClassIdGenerator.getNextId();
+      globalState_.equivClassMembers.put(id, Lists.newArrayList(new SlotId(i)));
+    }
+
+    // merge two classes if there is a value transfer between all members of the
+    // combined class; do this until there's nothing left to merge
+    boolean merged;
+    do {
+      merged = false;
+      for (Map.Entry<EquivalenceClassId, ArrayList<SlotId>> e1:
+          globalState_.equivClassMembers.entrySet()) {
+        for (Map.Entry<EquivalenceClassId, ArrayList<SlotId>> e2:
+            globalState_.equivClassMembers.entrySet()) {
+          if (e1.getKey() == e2.getKey()) continue;
+          List<SlotId> class1Members = e1.getValue();
+          if (class1Members.isEmpty()) continue;
+          List<SlotId> class2Members = e2.getValue();
+          if (class2Members.isEmpty()) continue;
+
+          // check whether we can transfer values between all members
+          boolean canMerge = true;
+          for (SlotId class1Slot: class1Members) {
+            for (SlotId class2Slot: class2Members) {
+              if (!hasValueTransfer(class1Slot, class2Slot)
+                  && !hasValueTransfer(class2Slot, class1Slot)) {
+                canMerge = false;
+                break;
+              }
+            }
+            if (!canMerge) break;
+          }
+          if (!canMerge) continue;
+
+          // merge classes 1 and 2 by transfering 2 into 1
+          class1Members.addAll(class2Members);
+          class2Members.clear();
+          merged = true;
+        }
+      }
+    } while (merged);
+
+    // populate equivClassSmap
+  

<TRUNCATED>


[35/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TimestampArithmeticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TimestampArithmeticExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/TimestampArithmeticExpr.java
deleted file mode 100644
index 5049064..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TimestampArithmeticExpr.java
+++ /dev/null
@@ -1,215 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.cloudera.impala.analysis.ArithmeticExpr.Operator;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Describes the addition and subtraction of time units from timestamps.
- * Arithmetic expressions on timestamps are syntactic sugar.
- * They are executed as function call exprs in the BE.
- */
-public class TimestampArithmeticExpr extends Expr {
-
-  // Time units supported in timestamp arithmetic.
-  public static enum TimeUnit {
-    YEAR("YEAR"),
-    MONTH("MONTH"),
-    WEEK("WEEK"),
-    DAY("DAY"),
-    HOUR("HOUR"),
-    MINUTE("MINUTE"),
-    SECOND("SECOND"),
-    MILLISECOND("MILLISECOND"),
-    MICROSECOND("MICROSECOND"),
-    NANOSECOND("NANOSECOND");
-
-    private final String description_;
-
-    private TimeUnit(String description) {
-      this.description_ = description;
-    }
-
-    @Override
-    public String toString() {
-      return description_;
-    }
-  }
-
-  private static Map<String, TimeUnit> TIME_UNITS_MAP = new HashMap<String, TimeUnit>();
-  static {
-    for (TimeUnit timeUnit : TimeUnit.values()) {
-      TIME_UNITS_MAP.put(timeUnit.toString(), timeUnit);
-      TIME_UNITS_MAP.put(timeUnit.toString() + "S", timeUnit);
-    }
-  }
-
-  // Set for function call-like arithmetic.
-  private final String funcName_;
-  private ArithmeticExpr.Operator op_;
-
-  // Keep the original string passed in the c'tor to resolve
-  // ambiguities with other uses of IDENT during query parsing.
-  private final String timeUnitIdent_;
-  private TimeUnit timeUnit_;
-
-  // Indicates an expr where the interval comes first, e.g., 'interval b year + a'.
-  private final boolean intervalFirst_;
-
-  // C'tor for function-call like arithmetic, e.g., 'date_add(a, interval b year)'.
-  public TimestampArithmeticExpr(String funcName, Expr e1, Expr e2,
-      String timeUnitIdent) {
-    this.funcName_ = funcName.toLowerCase();
-    this.timeUnitIdent_ = timeUnitIdent;
-    this.intervalFirst_ = false;
-    children_.add(e1);
-    children_.add(e2);
-  }
-
-  // C'tor for non-function-call like arithmetic, e.g., 'a + interval b year'.
-  // e1 always refers to the timestamp to be added/subtracted from, and e2
-  // to the time value (even in the interval-first case).
-  public TimestampArithmeticExpr(ArithmeticExpr.Operator op, Expr e1, Expr e2,
-      String timeUnitIdent, boolean intervalFirst) {
-    Preconditions.checkState(op == Operator.ADD || op == Operator.SUBTRACT);
-    this.funcName_ = null;
-    this.op_ = op;
-    this.timeUnitIdent_ = timeUnitIdent;
-    this.intervalFirst_ = intervalFirst;
-    children_.add(e1);
-    children_.add(e2);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected TimestampArithmeticExpr(TimestampArithmeticExpr other) {
-    super(other);
-    funcName_ = other.funcName_;
-    op_ = other.op_;
-    timeUnitIdent_ = other.timeUnitIdent_;
-    timeUnit_ = other.timeUnit_;
-    intervalFirst_ = other.intervalFirst_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    if (funcName_ != null) {
-      // Set op based on funcName for function-call like version.
-      if (funcName_.equals("date_add")) {
-        op_ = ArithmeticExpr.Operator.ADD;
-      } else if (funcName_.equals("date_sub")) {
-        op_ = ArithmeticExpr.Operator.SUBTRACT;
-      } else {
-        throw new AnalysisException("Encountered function name '" + funcName_ +
-            "' in timestamp arithmetic expression '" + toSql() + "'. " +
-            "Expected function name 'DATE_ADD' or 'DATE_SUB'.");
-      }
-    }
-
-    timeUnit_ = TIME_UNITS_MAP.get(timeUnitIdent_.toUpperCase());
-    if (timeUnit_ == null) {
-      throw new AnalysisException("Invalid time unit '" + timeUnitIdent_ +
-          "' in timestamp arithmetic expression '" + toSql() + "'.");
-    }
-
-    // The first child must return a timestamp or null.
-    if (!getChild(0).getType().isTimestamp() && !getChild(0).getType().isNull()) {
-      throw new AnalysisException("Operand '" + getChild(0).toSql() +
-          "' of timestamp arithmetic expression '" + toSql() + "' returns type '" +
-          getChild(0).getType().toSql() + "'. Expected type 'TIMESTAMP'.");
-    }
-
-    // The second child must be an integer type.
-    if (!getChild(1).getType().isIntegerType() &&
-        !getChild(1).getType().isNull()) {
-      throw new AnalysisException("Operand '" + getChild(1).toSql() +
-          "' of timestamp arithmetic expression '" + toSql() + "' returns type '" +
-          getChild(1).getType().toSql() + "'. Expected an integer type.");
-    }
-
-    String funcOpName = String.format("%sS_%s",  timeUnit_,
-        (op_ == ArithmeticExpr.Operator.ADD) ? "ADD" : "SUB");
-    // For the month interval, use the invisible special-case implementation.
-    // "ADD_MONTHS(t, m)" by definition is different from "t + INTERVAL m MONTHS".
-    if (timeUnit_ == TimeUnit.MONTH) funcOpName += "_INTERVAL";
-
-    fn_ = getBuiltinFunction(analyzer, funcOpName.toLowerCase(),
-         collectChildReturnTypes(), CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    castForFunctionCall(false);
-
-    Preconditions.checkNotNull(fn_);
-    Preconditions.checkState(fn_.getReturnType().isTimestamp());
-    type_ = fn_.getReturnType();
-    if (hasChildCosts()) evalCost_ = getChildCosts() + TIMESTAMP_ARITHMETIC_COST;
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  public String getTimeUnitIdent() { return timeUnitIdent_; }
-  public TimeUnit getTimeUnit() { return timeUnit_; }
-  public ArithmeticExpr.Operator getOp() { return op_; }
-
-  @Override
-  public String toSqlImpl() {
-    StringBuilder strBuilder = new StringBuilder();
-    if (funcName_ != null) {
-      // Function-call like version.
-      strBuilder.append(funcName_.toUpperCase() + "(");
-      strBuilder.append(getChild(0).toSql() + ", ");
-      strBuilder.append("INTERVAL ");
-      strBuilder.append(getChild(1).toSql());
-      strBuilder.append(" " + timeUnitIdent_);
-      strBuilder.append(")");
-      return strBuilder.toString();
-    }
-    if (intervalFirst_) {
-      // Non-function-call like version with interval as first operand.
-      strBuilder.append("INTERVAL ");
-      strBuilder.append(getChild(1).toSql() + " ");
-      strBuilder.append(timeUnitIdent_);
-      strBuilder.append(" " + op_.toString() + " ");
-      strBuilder.append(getChild(0).toSql());
-    } else {
-      // Non-function-call like version with interval as second operand.
-      strBuilder.append(getChild(0).toSql());
-      strBuilder.append(" " + op_.toString() + " ");
-      strBuilder.append("INTERVAL ");
-      strBuilder.append(getChild(1).toSql() + " ");
-      strBuilder.append(timeUnitIdent_);
-    }
-    return strBuilder.toString();
-  }
-
-  @Override
-  public Expr clone() { return new TimestampArithmeticExpr(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ToSqlUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ToSqlUtils.java b/fe/src/main/java/com/cloudera/impala/analysis/ToSqlUtils.java
deleted file mode 100644
index 851e6ba..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ToSqlUtils.java
+++ /dev/null
@@ -1,348 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import com.cloudera.impala.catalog.KuduTable;
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.Token;
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.ql.parse.HiveLexer;
-
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsCompression;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.PrintUtils;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Contains utility methods for creating SQL strings, for example,
- * for creating identifier strings that are compatible with Hive or Impala.
- */
-public class ToSqlUtils {
-  // Table properties to hide when generating the toSql() statement
-  // EXTERNAL and comment are hidden because they are part of the toSql result, e.g.,
-  // "CREATE EXTERNAL TABLE <name> ... COMMENT <comment> ..."
-  private static final ImmutableSet<String> HIDDEN_TABLE_PROPERTIES =
-      ImmutableSet.of("EXTERNAL", "comment");
-
-  /**
-   * Given an unquoted identifier string, returns an identifier lexable by
-   * Impala and Hive, possibly by enclosing the original identifier in "`" quotes.
-   * For example, Hive cannot parse its own auto-generated column
-   * names "_c0", "_c1" etc. unless they are quoted. Impala and Hive keywords
-   * must also be quoted.
-   *
-   * Impala's lexer recognizes a superset of the unquoted identifiers that Hive can.
-   * At the same time, Impala's and Hive's list of keywords differ.
-   * This method always returns an identifier that Impala and Hive can recognize,
-   * although for some identifiers the quotes may not be strictly necessary for
-   * one or the other system.
-   */
-  public static String getIdentSql(String ident) {
-    boolean hiveNeedsQuotes = true;
-    HiveLexer hiveLexer = new HiveLexer(new ANTLRStringStream(ident));
-    try {
-      Token t = hiveLexer.nextToken();
-      // Check that the lexer recognizes an identifier and then EOF.
-      boolean identFound = t.getType() == HiveLexer.Identifier;
-      t = hiveLexer.nextToken();
-      // No enclosing quotes are necessary for Hive.
-      hiveNeedsQuotes = !(identFound && t.getType() == HiveLexer.EOF);
-    } catch (Exception e) {
-      // Ignore exception and just quote the identifier to be safe.
-    }
-    boolean isImpalaKeyword = SqlScanner.isKeyword(ident.toUpperCase());
-    // Impala's scanner recognizes the ".123" portion of "db.123_tbl" as a decimal,
-    // so while the quoting is not necessary for the given identifier itself, the quotes
-    // are needed if this identifier will be preceded by a ".".
-    boolean startsWithNumber = false;
-    if (!hiveNeedsQuotes && !isImpalaKeyword) {
-      try {
-        Integer.parseInt(ident.substring(0, 1));
-        startsWithNumber = true;
-      } catch (NumberFormatException e) {
-        // Ignore exception, identifier does not start with number.
-      }
-    }
-    if (hiveNeedsQuotes || isImpalaKeyword || startsWithNumber) return "`" + ident + "`";
-    return ident;
-  }
-
-  public static List<String> getIdentSqlList(List<String> identList) {
-    List<String> identSqlList = Lists.newArrayList();
-    for (String ident: identList) {
-      identSqlList.add(getIdentSql(ident));
-    }
-    return identSqlList;
-  }
-
-  public static String getPathSql(List<String> path) {
-    StringBuilder result = new StringBuilder();
-    for (String p: path) {
-      if (result.length() > 0) result.append(".");
-      result.append(getIdentSql(p));
-    }
-    return result.toString();
-  }
-
-  /**
-   * Returns the "CREATE TABLE" SQL string corresponding to the given CreateTableStmt
-   * statement.
-   */
-  public static String getCreateTableSql(CreateTableStmt stmt) {
-    ArrayList<String> colsSql = Lists.newArrayList();
-    for (ColumnDef col: stmt.getColumnDefs()) {
-      colsSql.add(col.toString());
-    }
-    ArrayList<String> partitionColsSql = Lists.newArrayList();
-    for (ColumnDef col: stmt.getPartitionColumnDefs()) {
-      partitionColsSql.add(col.toString());
-    }
-    // TODO: Pass the correct compression, if applicable.
-    return getCreateTableSql(stmt.getDb(), stmt.getTbl(), stmt.getComment(), colsSql,
-        partitionColsSql, stmt.getTblProperties(), stmt.getSerdeProperties(),
-        stmt.isExternal(), stmt.getIfNotExists(), stmt.getRowFormat(),
-        HdfsFileFormat.fromThrift(stmt.getFileFormat()), HdfsCompression.NONE, null,
-        stmt.getLocation());
-  }
-
-  /**
-   * Returns the "CREATE TABLE" SQL string corresponding to the given
-   * CreateTableAsSelectStmt statement.
-   */
-  public static String getCreateTableSql(CreateTableAsSelectStmt stmt) {
-    CreateTableStmt innerStmt = stmt.getCreateStmt();
-    // Only add partition column labels to output. Table columns must not be specified as
-    // they are deduced from the select statement.
-    ArrayList<String> partitionColsSql = Lists.newArrayList();
-    for (ColumnDef col: innerStmt.getPartitionColumnDefs()) {
-      partitionColsSql.add(col.getColName());
-    }
-    // TODO: Pass the correct compression, if applicable.
-    String createTableSql = getCreateTableSql(innerStmt.getDb(), innerStmt.getTbl(),
-        innerStmt.getComment(), null, partitionColsSql, innerStmt.getTblProperties(),
-        innerStmt.getSerdeProperties(), innerStmt.isExternal(),
-        innerStmt.getIfNotExists(), innerStmt.getRowFormat(),
-        HdfsFileFormat.fromThrift(innerStmt.getFileFormat()), HdfsCompression.NONE, null,
-        innerStmt.getLocation());
-    return createTableSql + " AS " + stmt.getQueryStmt().toSql();
-  }
-
-  /**
-   * Returns a "CREATE TABLE" or "CREATE VIEW" statement that creates the specified
-   * table.
-   */
-  public static String getCreateTableSql(Table table) throws CatalogException {
-    Preconditions.checkNotNull(table);
-    if (table instanceof View) return getCreateViewSql((View)table);
-    org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable();
-    HashMap<String, String> properties = Maps.newHashMap(msTable.getParameters());
-    boolean isExternal = msTable.getTableType() != null &&
-        msTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString());
-    String comment = properties.get("comment");
-    for (String hiddenProperty: HIDDEN_TABLE_PROPERTIES) {
-      properties.remove(hiddenProperty);
-    }
-    ArrayList<String> colsSql = Lists.newArrayList();
-    ArrayList<String> partitionColsSql = Lists.newArrayList();
-    boolean isHbaseTable = table instanceof HBaseTable;
-    for (int i = 0; i < table.getColumns().size(); i++) {
-      if (!isHbaseTable && i < table.getNumClusteringCols()) {
-        partitionColsSql.add(columnToSql(table.getColumns().get(i)));
-      } else {
-        colsSql.add(columnToSql(table.getColumns().get(i)));
-      }
-    }
-    RowFormat rowFormat = RowFormat.fromStorageDescriptor(msTable.getSd());
-    HdfsFileFormat format = HdfsFileFormat.fromHdfsInputFormatClass(
-        msTable.getSd().getInputFormat());
-    HdfsCompression compression = HdfsCompression.fromHdfsInputFormatClass(
-        msTable.getSd().getInputFormat());
-    String location = isHbaseTable ? null : msTable.getSd().getLocation();
-    Map<String, String> serdeParameters = msTable.getSd().getSerdeInfo().getParameters();
-
-    String storageHandlerClassName = table.getStorageHandlerClassName();
-    if (table instanceof KuduTable) {
-      // Kudu tables don't use LOCATION syntax
-      location = null;
-      format = null;
-      // Kudu tables cannot use the Hive DDL syntax for the storage handler
-      storageHandlerClassName = null;
-    }
-    HdfsUri tableLocation = location == null ? null : new HdfsUri(location);
-    return getCreateTableSql(table.getDb().getName(), table.getName(), comment, colsSql,
-        partitionColsSql, properties, serdeParameters, isExternal, false, rowFormat,
-        format, compression, storageHandlerClassName, tableLocation);
-  }
-
-  /**
-   * Returns a "CREATE TABLE" string that creates the table with the specified properties.
-   * The tableName must not be null. If columnsSql is null, the schema syntax will
-   * not be generated.
-   */
-  public static String getCreateTableSql(String dbName, String tableName,
-      String tableComment, List<String> columnsSql, List<String> partitionColumnsSql,
-      Map<String, String> tblProperties, Map<String, String> serdeParameters,
-      boolean isExternal, boolean ifNotExists, RowFormat rowFormat,
-      HdfsFileFormat fileFormat, HdfsCompression compression, String storageHandlerClass,
-      HdfsUri location) {
-    Preconditions.checkNotNull(tableName);
-    StringBuilder sb = new StringBuilder("CREATE ");
-    if (isExternal) sb.append("EXTERNAL ");
-    sb.append("TABLE ");
-    if (ifNotExists) sb.append("IF NOT EXISTS ");
-    if (dbName != null) sb.append(dbName + ".");
-    sb.append(tableName);
-    if (columnsSql != null) {
-      sb.append(" (\n  ");
-      sb.append(Joiner.on(", \n  ").join(columnsSql));
-      sb.append("\n)");
-    }
-    sb.append("\n");
-    if (tableComment != null) sb.append(" COMMENT '" + tableComment + "'\n");
-
-    if (partitionColumnsSql != null && partitionColumnsSql.size() > 0) {
-      sb.append(String.format("PARTITIONED BY (\n  %s\n)\n",
-          Joiner.on(", \n  ").join(partitionColumnsSql)));
-    }
-
-    if (rowFormat != null && !rowFormat.isDefault()) {
-      sb.append("ROW FORMAT DELIMITED");
-      if (rowFormat.getFieldDelimiter() != null) {
-        String fieldDelim = StringEscapeUtils.escapeJava(rowFormat.getFieldDelimiter());
-        sb.append(" FIELDS TERMINATED BY '" + fieldDelim + "'");
-      }
-      if (rowFormat.getEscapeChar() != null) {
-        String escapeChar = StringEscapeUtils.escapeJava(rowFormat.getEscapeChar());
-        sb.append(" ESCAPED BY '" + escapeChar + "'");
-      }
-      if (rowFormat.getLineDelimiter() != null) {
-        String lineDelim = StringEscapeUtils.escapeJava(rowFormat.getLineDelimiter());
-        sb.append(" LINES TERMINATED BY '" + lineDelim + "'");
-      }
-      sb.append("\n");
-    }
-
-    if (storageHandlerClass == null) {
-      // TODO: Remove this special case when we have the LZO_TEXT writer
-      // We must handle LZO_TEXT specially because Impala does not yet support creating
-      // tables with this row format. In this case, we cannot output "WITH
-      // SERDEPROPERTIES" because Hive does not support it with "STORED AS". For any
-      // other HdfsFileFormat we want to output the serdeproperties because it is
-      // supported by Impala.
-      if (compression != HdfsCompression.LZO &&
-          compression != HdfsCompression.LZO_INDEX &&
-          serdeParameters != null && !serdeParameters.isEmpty()) {
-        sb.append(
-            "WITH SERDEPROPERTIES " + propertyMapToSql(serdeParameters) + "\n");
-      }
-
-      if (fileFormat != null) {
-        sb.append("STORED AS " + fileFormat.toSql(compression) + "\n");
-      }
-    } else {
-      // If the storageHandlerClass is set, then we will generate the proper Hive DDL
-      // because we do not yet support creating HBase tables via Impala.
-      sb.append("STORED BY '" + storageHandlerClass + "'\n");
-      if (serdeParameters != null && !serdeParameters.isEmpty()) {
-        sb.append(
-            "WITH SERDEPROPERTIES " + propertyMapToSql(serdeParameters) + "\n");
-      }
-    }
-    if (location != null) {
-      sb.append("LOCATION '" + location.toString() + "'\n");
-    }
-    if (tblProperties != null && !tblProperties.isEmpty()) {
-      sb.append("TBLPROPERTIES " + propertyMapToSql(tblProperties));
-    }
-    return sb.toString();
-  }
-
-  public static String getCreateFunctionSql(List<Function> functions) {
-    Preconditions.checkNotNull(functions);
-    StringBuilder sb = new StringBuilder();
-    for (Function fn: functions) {
-      sb.append(fn.toSql(false));
-    }
-    return sb.toString();
-  }
-
-  public static String getCreateViewSql(View view) {
-    StringBuffer sb = new StringBuffer();
-    sb.append("CREATE VIEW ");
-    // Use toSql() to ensure that the table name and query statement are normalized
-    // and identifiers are quoted.
-    sb.append(view.getTableName().toSql());
-    sb.append(" AS\n");
-    sb.append(view.getQueryStmt().toSql());
-    return sb.toString();
-  }
-
-  private static String columnToSql(Column col) {
-    StringBuilder sb = new StringBuilder(col.getName());
-    if (col.getType() != null) sb.append(" " + col.getType().toSql());
-    if (!Strings.isNullOrEmpty(col.getComment())) {
-      sb.append(String.format(" COMMENT '%s'", col.getComment()));
-    }
-    return sb.toString();
-  }
-
-  private static String propertyMapToSql(Map<String, String> propertyMap) {
-    List<String> properties = Lists.newArrayList();
-    for (Map.Entry<String, String> entry: propertyMap.entrySet()) {
-      properties.add(String.format("'%s'='%s'", entry.getKey(),
-          // Properties may contain characters that need to be escaped.
-          // e.g. If the row format escape delimiter is '\', the map of serde properties
-          // from the metastore table will contain 'escape.delim' => '\', which is not
-          // properly escaped.
-          StringEscapeUtils.escapeJava(entry.getValue())));
-    }
-    return "(" + Joiner.on(", ").join(properties) + ")";
-  }
-
-  /**
-   * Returns a SQL representation of the given list of hints. Uses the end-of-line
-   * commented plan hint style such that hinted views created by Impala are readable by
-   * Hive (parsed as a comment by Hive).
-   */
-  public static String getPlanHintsSql(List<String> hints) {
-    if (hints == null || hints.isEmpty()) return "";
-    StringBuilder sb = new StringBuilder();
-    sb.append("\n-- +");
-    sb.append(Joiner.on(",").join(hints));
-    sb.append("\n");
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TruncateStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TruncateStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/TruncateStmt.java
deleted file mode 100644
index 0ea930e..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TruncateStmt.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TTruncateParams;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of a TRUNCATE statement.
- * Acceptable syntax:
- *
- * TRUNCATE [TABLE] [IF EXISTS] [database.]table
- *
- */
-public class TruncateStmt extends StatementBase {
-  private TableName tableName_;
-  private final boolean ifExists_;
-
-  // Set in analyze().
-  private Table table_;
-
-  public TruncateStmt(TableName tableName, boolean ifExists) {
-    Preconditions.checkNotNull(tableName);
-    tableName_ = tableName;
-    table_ = null;
-    ifExists_ = ifExists;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    tableName_ = analyzer.getFqTableName(tableName_);
-    try {
-      table_ = analyzer.getTable(tableName_, Privilege.INSERT);
-    } catch (AnalysisException e) {
-      if (ifExists_ && analyzer.getMissingTbls().isEmpty()) return;
-      throw e;
-    }
-    // We only support truncating hdfs tables now.
-    if (!(table_ instanceof HdfsTable)) {
-      throw new AnalysisException(String.format(
-          "TRUNCATE TABLE not supported on non-HDFS table: %s", table_.getFullName()));
-    }
-  }
-
-  @Override
-  public String toSql() {
-    return "TRUNCATE TABLE " + (ifExists_ ? " IF EXISTS " : "") + tableName_;
-  }
-
-  public TTruncateParams toThrift() {
-    TTruncateParams params = new TTruncateParams();
-    params.setTable_name(tableName_.toThrift());
-    params.setIf_exists(ifExists_);
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java b/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
deleted file mode 100644
index 145a10b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
+++ /dev/null
@@ -1,310 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.lang.StringUtils;
-
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.thrift.TTupleDescriptor;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * A collection of slots that are organized in a CPU-friendly memory layout. A slot is
- * a typed placeholder for a single value operated on at runtime. A slot can be named or
- * anonymous. A named slot corresponds directly to a column or field that can be directly
- * referenced in a query by its name. An anonymous slot represents an intermediate value
- * produced during query execution, e.g., aggregation output.
- * A tuple descriptor has an associated type and a list of slots. Its type is a struct
- * that contains as its fields the list of all named slots covered by this tuple.
- * The list of slots tracks the named slots that are actually referenced in a query, as
- * well as all anonymous slots. Although not required, a tuple descriptor typically
- * only has named or anonymous slots and not a mix of both.
- *
- * For example, every table reference has a corresponding tuple descriptor. The columns
- * of the table are represented by the tuple descriptor's type (struct type with one
- * field per column). The list of slots tracks which of the table's columns are actually
- * referenced. A similar explanation applies for collection references.
- *
- * A tuple descriptor may be materialized or non-materialized. A non-materialized tuple
- * descriptor acts as a placeholder for 'virtual' table references such as inline views,
- * and must not be materialized at runtime.
- */
-public class TupleDescriptor {
-  private final TupleId id_;
-  private final String debugName_;  // debug-only
-  private final ArrayList<SlotDescriptor> slots_ = Lists.newArrayList();
-
-  // Resolved path to the collection corresponding to this tuple descriptor, if any,
-  // Only set for materialized tuples.
-  private Path path_;
-
-  // Type of this tuple descriptor. Used for slot/table resolution in analysis.
-  private StructType type_;
-
-  // All legal aliases of this tuple.
-  private String[] aliases_;
-
-  // If true, requires that aliases_.length() == 1. However, aliases_.length() == 1
-  // does not imply an explicit alias because nested collection refs have only a
-  // single implicit alias.
-  private boolean hasExplicitAlias_;
-
-  // If false, this tuple doesn't need to be materialized.
-  private boolean isMaterialized_ = true;
-
-  // If true, computeMemLayout() has been called and we can't add any additional slots.
-  private boolean hasMemLayout_ = false;
-
-  private int byteSize_;  // of all slots plus null indicators
-  private int numNullBytes_;
-  private float avgSerializedSize_;  // in bytes; includes serialization overhead
-
-  public TupleDescriptor(TupleId id, String debugName) {
-    id_ = id;
-    path_ = null;
-    debugName_ = debugName;
-  }
-
-  public void addSlot(SlotDescriptor desc) {
-    Preconditions.checkState(!hasMemLayout_);
-    slots_.add(desc);
-  }
-
-  public TupleId getId() { return id_; }
-  public ArrayList<SlotDescriptor> getSlots() { return slots_; }
-
-  public ArrayList<SlotDescriptor> getMaterializedSlots() {
-    ArrayList<SlotDescriptor> result = Lists.newArrayList();
-    for (SlotDescriptor slot: slots_) {
-      if (slot.isMaterialized()) result.add(slot);
-    }
-    return result;
-  }
-
-  public Table getTable() {
-    if (path_ == null) return null;
-    return path_.getRootTable();
-  }
-
-  public TableName getTableName() {
-    Table t = getTable();
-    return (t == null) ? null : t.getTableName();
-  }
-
-  public void setPath(Path p) {
-    Preconditions.checkNotNull(p);
-    Preconditions.checkState(p.isResolved());
-    Preconditions.checkState(p.destType().isCollectionType());
-    path_ = p;
-    if (p.destTable() != null) {
-      // Do not use Path.getTypeAsStruct() to only allow implicit path resolutions,
-      // because this tuple desc belongs to a base table ref.
-      type_ = (StructType) p.destTable().getType().getItemType();
-    } else {
-      // Also allow explicit path resolutions.
-      type_ = Path.getTypeAsStruct(p.destType());
-    }
-  }
-
-  public Path getPath() { return path_; }
-  public void setType(StructType type) { type_ = type; }
-  public StructType getType() { return type_; }
-  public int getByteSize() { return byteSize_; }
-  public float getAvgSerializedSize() { return avgSerializedSize_; }
-  public boolean isMaterialized() { return isMaterialized_; }
-  public void setIsMaterialized(boolean value) { isMaterialized_ = value; }
-  public boolean hasMemLayout() { return hasMemLayout_; }
-  public void setAliases(String[] aliases, boolean hasExplicitAlias) {
-    aliases_ = aliases;
-    hasExplicitAlias_ = hasExplicitAlias;
-  }
-  public boolean hasExplicitAlias() { return hasExplicitAlias_; }
-  public String getAlias() { return (aliases_ != null) ? aliases_[0] : null; }
-  public TableName getAliasAsName() {
-    return (aliases_ != null) ? new TableName(null, aliases_[0]) : null;
-  }
-
-  public TupleDescriptor getRootDesc() {
-    if (path_ == null) return null;
-    return path_.getRootDesc();
-  }
-
-  public String debugString() {
-    String tblStr = (getTable() == null ? "null" : getTable().getFullName());
-    List<String> slotStrings = Lists.newArrayList();
-    for (SlotDescriptor slot : slots_) {
-      slotStrings.add(slot.debugString());
-    }
-    return Objects.toStringHelper(this)
-        .add("id", id_.asInt())
-        .add("name", debugName_)
-        .add("tbl", tblStr)
-        .add("byte_size", byteSize_)
-        .add("is_materialized", isMaterialized_)
-        .add("slots", "[" + Joiner.on(", ").join(slotStrings) + "]")
-        .toString();
-  }
-
-  /**
-   * Checks that this tuple is materialized and has a mem layout. Throws if this tuple
-   * is not executable, i.e., if one of those conditions is not met.
-   */
-  public void checkIsExecutable() {
-    Preconditions.checkState(isMaterialized_, String.format(
-        "Illegal reference to non-materialized tuple: debugname=%s alias=%s tid=%s",
-        debugName_, StringUtils.defaultIfEmpty(getAlias(), "n/a"), id_));
-    Preconditions.checkState(hasMemLayout_, String.format(
-        "Missing memory layout for tuple: debugname=%s alias=%s tid=%s",
-        debugName_, StringUtils.defaultIfEmpty(getAlias(), "n/a"), id_));
-  }
-
-  /**
-   * Materialize all slots.
-   */
-  public void materializeSlots() {
-    for (SlotDescriptor slot: slots_) {
-      slot.setIsMaterialized(true);
-    }
-  }
-
-  public TTupleDescriptor toThrift() {
-    TTupleDescriptor ttupleDesc =
-        new TTupleDescriptor(id_.asInt(), byteSize_, numNullBytes_);
-    // do not set the table id or tuple path for views
-    if (getTable() != null && !(getTable() instanceof View)) {
-      ttupleDesc.setTableId(getTable().getId().asInt());
-      Preconditions.checkNotNull(path_);
-      ttupleDesc.setTuplePath(path_.getAbsolutePath());
-    }
-    return ttupleDesc;
-  }
-
-  public void computeMemLayout() {
-    if (hasMemLayout_) return;
-    hasMemLayout_ = true;
-
-    // sort slots by size
-    Map<Integer, List<SlotDescriptor>> slotsBySize =
-        new HashMap<Integer, List<SlotDescriptor>>();
-
-    // populate slotsBySize; also compute avgSerializedSize
-    int numNullableSlots = 0;
-    for (SlotDescriptor d: slots_) {
-      if (!d.isMaterialized()) continue;
-      ColumnStats stats = d.getStats();
-      if (stats.hasAvgSerializedSize()) {
-        avgSerializedSize_ += d.getStats().getAvgSerializedSize();
-      } else {
-        // TODO: for computed slots, try to come up with stats estimates
-        avgSerializedSize_ += d.getType().getSlotSize();
-      }
-      if (!slotsBySize.containsKey(d.getType().getSlotSize())) {
-        slotsBySize.put(d.getType().getSlotSize(), new ArrayList<SlotDescriptor>());
-      }
-      slotsBySize.get(d.getType().getSlotSize()).add(d);
-      if (d.getIsNullable()) ++numNullableSlots;
-    }
-    // we shouldn't have anything of size <= 0
-    Preconditions.checkState(!slotsBySize.containsKey(0));
-    Preconditions.checkState(!slotsBySize.containsKey(-1));
-
-    // assign offsets to slots in order of ascending size
-    numNullBytes_ = (numNullableSlots + 7) / 8;
-    int offset = numNullBytes_;
-    int nullIndicatorByte = 0;
-    int nullIndicatorBit = 0;
-    // slotIdx is the index into the resulting tuple struct.  The first (smallest) field
-    // is 0, next is 1, etc.
-    int slotIdx = 0;
-    List<Integer> sortedSizes = new ArrayList<Integer>(slotsBySize.keySet());
-    Collections.sort(sortedSizes);
-    for (int slotSize: sortedSizes) {
-      if (slotsBySize.get(slotSize).isEmpty()) continue;
-      if (slotSize > 1) {
-        // insert padding
-        int alignTo = Math.min(slotSize, 8);
-        offset = (offset + alignTo - 1) / alignTo * alignTo;
-      }
-
-      for (SlotDescriptor d: slotsBySize.get(slotSize)) {
-        Preconditions.checkState(d.isMaterialized());
-        d.setByteSize(slotSize);
-        d.setByteOffset(offset);
-        d.setSlotIdx(slotIdx++);
-        offset += slotSize;
-
-        // assign null indicator
-        if (d.getIsNullable()) {
-          d.setNullIndicatorByte(nullIndicatorByte);
-          d.setNullIndicatorBit(nullIndicatorBit);
-          nullIndicatorBit = (nullIndicatorBit + 1) % 8;
-          if (nullIndicatorBit == 0) ++nullIndicatorByte;
-        } else {
-          // Non-nullable slots will have 0 for the byte offset and -1 for the bit mask
-          d.setNullIndicatorBit(-1);
-          d.setNullIndicatorByte(0);
-        }
-      }
-    }
-
-    this.byteSize_ = offset;
-  }
-
-  /**
-   * Return true if the slots being materialized are all partition columns.
-   */
-  public boolean hasClusteringColsOnly() {
-    Table table = getTable();
-    if (!(table instanceof HdfsTable) || table.getNumClusteringCols() == 0) return false;
-
-    HdfsTable hdfsTable = (HdfsTable)table;
-    for (SlotDescriptor slotDesc: getSlots()) {
-      if (!slotDesc.isMaterialized()) continue;
-      if (slotDesc.getColumn() == null ||
-          slotDesc.getColumn().getPosition() >= hdfsTable.getNumClusteringCols()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Returns true if tuples of type 'this' can be assigned to tuples of type 'desc'
-   * (checks that both have the same number of slots and that slots are of the same type)
-   */
-  public boolean isCompatible(TupleDescriptor desc) {
-    if (slots_.size() != desc.slots_.size()) return false;
-    for (int i = 0; i < slots_.size(); ++i) {
-      if (!slots_.get(i).getType().equals(desc.slots_.get(i).getType())) return false;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TupleId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TupleId.java b/fe/src/main/java/com/cloudera/impala/analysis/TupleId.java
deleted file mode 100644
index 918671c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TupleId.java
+++ /dev/null
@@ -1,40 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-/**
- * Tuple identifier unique within a single query.
- */
-public class TupleId extends Id<TupleId> {
-  // Construction only allowed via an IdGenerator.
-  protected TupleId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<TupleId> createGenerator() {
-    return new IdGenerator<TupleId>() {
-      @Override
-      public TupleId getNextId() { return new TupleId(nextId_++); }
-      @Override
-      public TupleId getMaxId() { return new TupleId(nextId_ - 1); }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TupleIsNullPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TupleIsNullPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/TupleIsNullPredicate.java
deleted file mode 100644
index 5a07328..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TupleIsNullPredicate.java
+++ /dev/null
@@ -1,185 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-import java.util.Set;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.cloudera.impala.thrift.TTupleIsNullPredicate;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Internal expr that returns true if all of the given tuples are NULL, otherwise false.
- * Used to make exprs originating from an inline view nullable in an outer join.
- * The given tupleIds must be materialized but not necessarily nullable at the
- * appropriate PlanNode. It is important not to require nullability of the tuples
- * because some exprs may be wrapped in a TupleIsNullPredicate that contain
- * SlotRefs on non-nullable tuples, e.g., an expr in the On-clause of an outer join
- * that refers to an outer-joined inline view (see IMPALA-904).
- */
-public class TupleIsNullPredicate extends Predicate {
-  private final Set<TupleId> tupleIds_;
-  private Analyzer analyzer_;
-
-  public TupleIsNullPredicate(List<TupleId> tupleIds) {
-    Preconditions.checkState(tupleIds != null && !tupleIds.isEmpty());
-    this.tupleIds_ = Sets.newHashSet(tupleIds);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected TupleIsNullPredicate(TupleIsNullPredicate other) {
-    super(other);
-    tupleIds_ = Sets.newHashSet(other.tupleIds_);
-    analyzer_ = other.analyzer_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    analyzer_ = analyzer;
-    evalCost_ = tupleIds_.size() * IS_NULL_COST;
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.TUPLE_IS_NULL_PRED;
-    msg.tuple_is_null_pred = new TTupleIsNullPredicate();
-    Preconditions.checkNotNull(analyzer_);
-    for (TupleId tid: tupleIds_) {
-      // Check that all referenced tuples are materialized.
-      TupleDescriptor tupleDesc = analyzer_.getTupleDesc(tid);
-      Preconditions.checkNotNull(tupleDesc, "Unknown tuple id: " + tid.toString());
-      Preconditions.checkState(tupleDesc.isMaterialized(),
-          String.format("Illegal reference to non-materialized tuple: tid=%s", tid));
-      msg.tuple_is_null_pred.addToTuple_ids(tid.asInt());
-    }
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!super.equals(o)) return false;
-    TupleIsNullPredicate other = (TupleIsNullPredicate) o;
-    return other.tupleIds_.containsAll(tupleIds_) &&
-        tupleIds_.containsAll(other.tupleIds_);
-  }
-
-  @Override
-  protected String toSqlImpl() { return "TupleIsNull()"; }
-
-  public Set<TupleId> getTupleIds() { return tupleIds_; }
-
-  @Override
-  public boolean isBoundByTupleIds(List<TupleId> tids) {
-    return tids.containsAll(tupleIds_);
-  }
-
-  @Override
-  public boolean isConstant() { return false; }
-
-  /**
-   * Makes each input expr nullable, if necessary, by wrapping it as follows:
-   * IF(TupleIsNull(tids), NULL, expr)
-   *
-   * The given tids must be materialized. The given inputExprs are expected to be bound
-   * by tids once fully substituted against base tables. However, inputExprs may not yet
-   * be fully substituted at this point.
-   *
-   * Returns a new list with the nullable exprs.
-   */
-  public static List<Expr> wrapExprs(List<Expr> inputExprs,
-      List<TupleId> tids, Analyzer analyzer) throws InternalException {
-    // Assert that all tids are materialized.
-    for (TupleId tid: tids) {
-      TupleDescriptor tupleDesc = analyzer.getTupleDesc(tid);
-      Preconditions.checkState(tupleDesc.isMaterialized());
-    }
-    // Perform the wrapping.
-    List<Expr> result = Lists.newArrayListWithCapacity(inputExprs.size());
-    for (Expr e: inputExprs) {
-      result.add(wrapExpr(e, tids, analyzer));
-    }
-    return result;
-  }
-
-  /**
-   * Returns a new analyzed conditional expr 'IF(TupleIsNull(tids), NULL, expr)',
-   * if required to make expr nullable. Otherwise, returns expr.
-   */
-  public static Expr wrapExpr(Expr expr, List<TupleId> tids, Analyzer analyzer)
-      throws InternalException {
-    if (!requiresNullWrapping(expr, analyzer)) return expr;
-    List<Expr> params = Lists.newArrayList();
-    params.add(new TupleIsNullPredicate(tids));
-    params.add(new NullLiteral());
-    params.add(expr);
-    Expr ifExpr = new FunctionCallExpr("if", params);
-    ifExpr.analyzeNoThrow(analyzer);
-    return ifExpr;
-  }
-
-  /**
-   * Returns true if the given expr evaluates to a non-NULL value if all its contained
-   * SlotRefs evaluate to NULL, false otherwise.
-   * Throws an InternalException if expr evaluation in the BE failed.
-   */
-  private static boolean requiresNullWrapping(Expr expr, Analyzer analyzer)
-      throws InternalException {
-    Preconditions.checkNotNull(expr);
-    // If the expr is already wrapped in an IF(TupleIsNull(), NULL, expr)
-    // then it must definitely be wrapped again at this level.
-    // Do not try to execute expr because a TupleIsNullPredicate is not constant.
-    if (expr.contains(TupleIsNullPredicate.class)) return true;
-    // Wrap expr with an IS NOT NULL predicate.
-    Expr isNotNullLiteralPred = new IsNullPredicate(expr, true);
-    // analyze to insert casts, etc.
-    isNotNullLiteralPred.analyzeNoThrow(analyzer);
-    return analyzer.isTrueWithNullSlots(isNotNullLiteralPred);
-  }
-
-  /**
-   * Recursive function that replaces all 'IF(TupleIsNull(), NULL, e)' exprs in
-   * 'expr' with e and returns the modified expr.
-   */
-  public static Expr unwrapExpr(Expr expr)  {
-    if (expr instanceof FunctionCallExpr) {
-      FunctionCallExpr fnCallExpr = (FunctionCallExpr) expr;
-      List<Expr> params = fnCallExpr.getParams().exprs();
-      if (fnCallExpr.getFnName().getFunction().equals("if") &&
-          params.get(0) instanceof TupleIsNullPredicate &&
-          params.get(1) instanceof NullLiteral) {
-        return unwrapExpr(params.get(2));
-      }
-    }
-    for (int i = 0; i < expr.getChildren().size(); ++i) {
-      expr.setChild(i, unwrapExpr(expr.getChild(i)));
-    }
-    return expr;
-  }
-
-  @Override
-  public Expr clone() { return new TupleIsNullPredicate(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TypeDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TypeDef.java b/fe/src/main/java/com/cloudera/impala/analysis/TypeDef.java
deleted file mode 100644
index bbbb1b4..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TypeDef.java
+++ /dev/null
@@ -1,159 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-
-/**
- * Represents an anonymous type definition, e.g., used in DDL and CASTs.
- */
-public class TypeDef implements ParseNode {
-  private boolean isAnalyzed_;
-  private final Type parsedType_;
-
-  public TypeDef(Type parsedType) {
-    parsedType_ = parsedType;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    // Check the max nesting depth before calling the recursive analyze() to avoid
-    // a stack overflow.
-    if (parsedType_.exceedsMaxNestingDepth()) {
-      throw new AnalysisException(String.format(
-          "Type exceeds the maximum nesting depth of %s:\n%s",
-          Type.MAX_NESTING_DEPTH, parsedType_.toSql()));
-    }
-    analyze(parsedType_, analyzer);
-    isAnalyzed_ = true;
-  }
-
-  private void analyze(Type type, Analyzer analyzer) throws AnalysisException {
-    if (!type.isSupported()) {
-      throw new AnalysisException("Unsupported data type: " + type.toSql());
-    }
-    if (type.isScalarType()) {
-      analyzeScalarType((ScalarType) type, analyzer);
-    } else if (type.isStructType()) {
-      analyzeStructType((StructType) type, analyzer);
-    } else if (type.isArrayType()) {
-      ArrayType arrayType = (ArrayType) type;
-      analyze(arrayType.getItemType(), analyzer);
-    } else {
-      Preconditions.checkState(type.isMapType());
-      analyzeMapType((MapType) type, analyzer);
-    }
-  }
-
-  private void analyzeScalarType(ScalarType scalarType, Analyzer analyzer)
-      throws AnalysisException {
-    PrimitiveType type = scalarType.getPrimitiveType();
-    switch (type) {
-      case CHAR:
-      case VARCHAR: {
-        String name;
-        int maxLen;
-        if (type == PrimitiveType.VARCHAR) {
-          name = "Varchar";
-          maxLen = ScalarType.MAX_VARCHAR_LENGTH;
-        } else if (type == PrimitiveType.CHAR) {
-          name = "Char";
-          maxLen = ScalarType.MAX_CHAR_LENGTH;
-        } else {
-          Preconditions.checkState(false);
-          return;
-        }
-        int len = scalarType.getLength();
-        if (len <= 0) {
-          throw new AnalysisException(name + " size must be > 0: " + len);
-        }
-        if (scalarType.getLength() > maxLen) {
-          throw new AnalysisException(
-              name + " size must be <= " + maxLen + ": " + len);
-        }
-        break;
-      }
-      case DECIMAL: {
-        int precision = scalarType.decimalPrecision();
-        int scale = scalarType.decimalScale();
-        if (precision > ScalarType.MAX_PRECISION) {
-          throw new AnalysisException("Decimal precision must be <= " +
-              ScalarType.MAX_PRECISION + ": " + precision);
-        }
-        if (precision == 0) {
-          throw new AnalysisException("Decimal precision must be > 0: " + precision);
-        }
-        if (scale > precision) {
-          throw new AnalysisException("Decimal scale (" + scale + ") must be <= " +
-              "precision (" + precision + ")");
-        }
-      }
-      default: break;
-    }
-  }
-
-  private void analyzeStructType(StructType structType, Analyzer analyzer)
-      throws AnalysisException {
-    // Check for duplicate field names.
-    Set<String> fieldNames = Sets.newHashSet();
-    for (StructField f: structType.getFields()) {
-      analyze(f.getType(), analyzer);
-      if (!fieldNames.add(f.getName().toLowerCase())) {
-        throw new AnalysisException(
-            String.format("Duplicate field name '%s' in struct '%s'",
-                f.getName(), toSql()));
-      }
-      // Check whether the column name meets the Metastore's requirements.
-      if (!MetaStoreUtils.validateName(f.getName().toLowerCase())) {
-        throw new AnalysisException("Invalid struct field name: " + f.getName());
-      }
-    }
-  }
-
-  private void analyzeMapType(MapType mapType, Analyzer analyzer)
-      throws AnalysisException {
-    analyze(mapType.getKeyType(), analyzer);
-    if (mapType.getKeyType().isComplexType()) {
-      throw new AnalysisException(
-          "Map type cannot have a complex-typed key: " + mapType.toSql());
-    }
-    analyze(mapType.getValueType(), analyzer);
-  }
-
-  public Type getType() { return parsedType_; }
-
-  @Override
-  public String toString() { return parsedType_.toSql(); }
-
-  @Override
-  public String toSql() { return parsedType_.toSql(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/TypesUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TypesUtil.java b/fe/src/main/java/com/cloudera/impala/analysis/TypesUtil.java
deleted file mode 100644
index 58bb960..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/TypesUtil.java
+++ /dev/null
@@ -1,237 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-
-// Utility class for handling types.
-public class TypesUtil {
-  // The sql standard specifies that the scale after division is incremented
-  // by a system wide constant. Hive picked 4 so we will as well.
-  // TODO: how did they pick this?
-  static final int DECIMAL_DIVISION_SCALE_INCREMENT = 4;
-
-  /**
-   * [1-9] precision -> 4 bytes
-   * [10-18] precision -> 8 bytes
-   * [19-38] precision -> 16 bytes
-   * TODO: Support 12 byte decimal?
-   * For precision [20-28], we could support a 12 byte decimal but currently a 12
-   * byte decimal in the BE is not implemented.
-   */
-  public static int getDecimalSlotSize(ScalarType type) {
-    Preconditions.checkState(type.isDecimal() && !type.isWildcardDecimal());
-    if (type.decimalPrecision() <= 9) return 4;
-    if (type.decimalPrecision() <= 18) return 8;
-    return 16;
-  }
-
-  /**
-   * Returns the smallest integer type that can store decType without loss
-   * of precision. decType must have scale == 0.
-   * In the case where the decimal can be bigger than BIGINT, we return
-   * BIGINT (and the execution will report it as overflows).
-   */
-  public static ScalarType getContainingIntType(ScalarType decType) {
-    Preconditions.checkState(decType.isFullySpecifiedDecimal());
-    Preconditions.checkState(decType.decimalScale() == 0);
-    // TINYINT_MAX = 128
-    if (decType.decimalPrecision() <= 2) return Type.TINYINT;
-    // SMALLINT_MAX = 32768
-    if (decType.decimalPrecision() <= 4) return Type.SMALLINT;
-    // INT_MAX = 2147483648
-    if (decType.decimalPrecision() <= 9) return Type.INT;
-    return Type.BIGINT;
-  }
-
-  /**
-   * Returns the decimal type that can hold t1 and t2 without loss of precision.
-   * decimal(10, 2) && decimal(12, 2) -> decimal(12, 2)
-   * decimal (10, 5) && decimal(12, 3) -> decimal(14, 5)
-   * Either t1 or t2 can be a wildcard decimal (but not both).
-   */
-  public static ScalarType getDecimalAssignmentCompatibleType(
-      ScalarType t1, ScalarType t2) {
-    Preconditions.checkState(t1.isDecimal());
-    Preconditions.checkState(t2.isDecimal());
-    Preconditions.checkState(!(t1.isWildcardDecimal() && t2.isWildcardDecimal()));
-    if (t1.isWildcardDecimal()) return t2;
-    if (t2.isWildcardDecimal()) return t1;
-
-    Preconditions.checkState(t1.isFullySpecifiedDecimal());
-    Preconditions.checkState(t2.isFullySpecifiedDecimal());
-    if (t1.equals(t2)) return t1;
-    int s1 = t1.decimalScale();
-    int s2 = t2.decimalScale();
-    int p1 = t1.decimalPrecision();
-    int p2 = t2.decimalPrecision();
-    int digitsBefore = Math.max(p1 - s1, p2 - s2);
-    int digitsAfter = Math.max(s1, s2);
-    return ScalarType.createDecimalTypeInternal(
-        digitsBefore + digitsAfter, digitsAfter);
-  }
-
-  /**
-   * Returns the necessary result type for t1 op t2. Throws an analysis exception
-   * if the operation does not make sense for the types.
-   */
-  public static Type getArithmeticResultType(Type t1, Type t2,
-      ArithmeticExpr.Operator op) throws AnalysisException {
-    Preconditions.checkState(t1.isNumericType() || t1.isNull());
-    Preconditions.checkState(t2.isNumericType() || t2.isNull());
-
-    if (t1.isNull() && t2.isNull()) return Type.NULL;
-
-    if (t1.isDecimal() || t2.isDecimal()) {
-      if (t1.isNull()) return t2;
-      if (t2.isNull()) return t1;
-
-      // For multiplications involving at least one floating point type we cast decimal to
-      // double in order to prevent decimals from overflowing.
-      if (op == ArithmeticExpr.Operator.MULTIPLY &&
-          (t1.isFloatingPointType() || t2.isFloatingPointType())) {
-        return Type.DOUBLE;
-      }
-
-      t1 = ((ScalarType) t1).getMinResolutionDecimal();
-      t2 = ((ScalarType) t2).getMinResolutionDecimal();
-      Preconditions.checkState(t1.isDecimal());
-      Preconditions.checkState(t2.isDecimal());
-      return getDecimalArithmeticResultType(t1, t2, op);
-    }
-
-    Type type = null;
-    switch (op) {
-      case MULTIPLY:
-      case ADD:
-      case SUBTRACT:
-        // If one of the types is null, use the compatible type without promotion.
-        // Otherwise, promote the compatible type to the next higher resolution type,
-        // to ensure that that a <op> b won't overflow/underflow.
-        Type compatibleType =
-            ScalarType.getAssignmentCompatibleType(t1, t2, false);
-        Preconditions.checkState(compatibleType.isScalarType());
-        type = ((ScalarType) compatibleType).getNextResolutionType();
-        break;
-      case MOD:
-        type = ScalarType.getAssignmentCompatibleType(t1, t2, false);
-        break;
-      case DIVIDE:
-        type = Type.DOUBLE;
-        break;
-      default:
-        throw new AnalysisException("Invalid op: " + op);
-    }
-    Preconditions.checkState(type.isValid());
-    return type;
-  }
-
-  /**
-   * Returns the resulting typical type from (t1 op t2)
-   * These rules are mostly taken from the hive/sql server rules with some changes.
-   * http://blogs.msdn.com/b/sqlprogrammability/archive/2006/03/29/564110.aspx
-   *
-   * Changes:
-   *  - Multiply does not need +1 for the result precision.
-   *  - Divide scale truncation is different.
-   */
-  public static ScalarType getDecimalArithmeticResultType(Type t1, Type t2,
-      ArithmeticExpr.Operator op) throws AnalysisException {
-    Preconditions.checkState(t1.isFullySpecifiedDecimal());
-    Preconditions.checkState(t2.isFullySpecifiedDecimal());
-    ScalarType st1 = (ScalarType) t1;
-    ScalarType st2 = (ScalarType) t2;
-    int s1 = st1.decimalScale();
-    int s2 = st2.decimalScale();
-    int p1 = st1.decimalPrecision();
-    int p2 = st2.decimalPrecision();
-    int sMax = Math.max(s1, s2);
-
-    switch (op) {
-      case ADD:
-      case SUBTRACT:
-        return ScalarType.createDecimalTypeInternal(
-            sMax + Math.max(p1 - s1, p2 - s2) + 1, sMax);
-      case MULTIPLY:
-        return ScalarType.createDecimalTypeInternal(p1 + p2, s1 + s2);
-      case DIVIDE:
-        int resultScale = Math.max(DECIMAL_DIVISION_SCALE_INCREMENT, s1 + p2 + 1);
-        int resultPrecision = p1 - s1 + s2 + resultScale;
-        if (resultPrecision > ScalarType.MAX_PRECISION) {
-          // In this case, the desired resulting precision exceeds the maximum and
-          // we need to truncate some way. We can either remove digits before or
-          // after the decimal and there is no right answer. This is an implementation
-          // detail and different databases will handle this differently.
-          // For simplicity, we will set the resulting scale to be the max of the input
-          // scales and use the maximum precision.
-          resultScale = Math.max(s1, s2);
-          resultPrecision = ScalarType.MAX_PRECISION;
-        }
-        return ScalarType.createDecimalTypeInternal(resultPrecision, resultScale);
-      case MOD:
-        return ScalarType.createDecimalTypeInternal(
-            Math.min(p1 - s1, p2 - s2) + sMax, sMax);
-      default:
-        throw new AnalysisException(
-            "Operation '" + op + "' is not allowed for decimal types.");
-    }
-  }
-
-  /**
-   * Computes the ColumnType that can represent 'v' with no loss of resolution.
-   * The scale/precision in BigDecimal is not compatible with SQL decimal semantics
-   * (much more like significant figures and exponent).
-   * Returns null if the value cannot be represented.
-   */
-  public static Type computeDecimalType(BigDecimal v) {
-    // PlainString returns the string with no exponent. We walk it to compute
-    // the digits before and after.
-    // TODO: better way?
-    String str = v.toPlainString();
-    int digitsBefore = 0;
-    int digitsAfter = 0;
-    boolean decimalFound = false;
-    boolean leadingZeros = true;
-    for (int i = 0; i < str.length(); ++i) {
-      char c = str.charAt(i);
-      if (c == '-') continue;
-      if (c == '.') {
-        decimalFound = true;
-        continue;
-      }
-      if (decimalFound) {
-        ++digitsAfter;
-      } else {
-        // Strip out leading 0 before the decimal point. We want "0.1" to
-        // be parsed as ".1" (1 digit instead of 2).
-        if (c == '0' && leadingZeros) continue;
-        leadingZeros = false;
-        ++digitsBefore;
-      }
-    }
-    if (digitsAfter > ScalarType.MAX_SCALE) return null;
-    if (digitsBefore + digitsAfter > ScalarType.MAX_PRECISION) return null;
-    if (digitsBefore == 0 && digitsAfter == 0) digitsBefore = 1;
-    return ScalarType.createDecimalType(digitsBefore + digitsAfter, digitsAfter);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/UnionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/UnionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/UnionStmt.java
deleted file mode 100644
index 10758c7..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/UnionStmt.java
+++ /dev/null
@@ -1,607 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Representation of a union with its list of operands, and optional order by and limit.
- * A union materializes its results, and its resultExprs are slotrefs into the
- * materialized tuple.
- * During analysis, the operands are normalized (separated into a single sequence of
- * DISTINCT followed by a single sequence of ALL operands) and unnested to the extent
- * possible. This also creates the AggregationInfo for DISTINCT operands.
- */
-public class UnionStmt extends QueryStmt {
-  private final static Logger LOG = LoggerFactory.getLogger(UnionStmt.class);
-
-  public static enum Qualifier {
-    ALL,
-    DISTINCT
-  }
-
-  /**
-   * Represents an operand to a union, created by the parser.
-   * Contains a query statement and the all/distinct qualifier
-   * of the union operator (null for the first queryStmt).
-   */
-  public static class UnionOperand {
-    // Qualifier as seen by the parser. Null for the first operand.
-    private final Qualifier originalQualifier_;
-
-    /////////////////////////////////////////
-    // BEGIN: Members that need to be reset()
-
-    private final QueryStmt queryStmt_;
-
-    // Effective qualifier. Possibly different from parsedQualifier_ due
-    // to DISTINCT propagation.
-    private Qualifier qualifier_;
-
-    // Analyzer used for this operand. Set in analyze().
-    // We must preserve the conjuncts registered in the analyzer for partition pruning.
-    private Analyzer analyzer_;
-
-    // Map from UnionStmt's result slots to our resultExprs. Used during plan generation.
-    private final ExprSubstitutionMap smap_;
-
-    // END: Members that need to be reset()
-    /////////////////////////////////////////
-
-    public UnionOperand(QueryStmt queryStmt, Qualifier qualifier) {
-      queryStmt_ = queryStmt;
-      originalQualifier_ = qualifier;
-      qualifier_ = qualifier;
-      smap_ = new ExprSubstitutionMap();
-    }
-
-    public void analyze(Analyzer parent) throws AnalysisException {
-      if (isAnalyzed()) return;
-      analyzer_ = new Analyzer(parent);
-      queryStmt_.analyze(analyzer_);
-    }
-
-    public boolean isAnalyzed() { return analyzer_ != null; }
-    public QueryStmt getQueryStmt() { return queryStmt_; }
-    public Qualifier getQualifier() { return qualifier_; }
-    // Used for propagating DISTINCT.
-    public void setQualifier(Qualifier qualifier) { qualifier_ = qualifier; }
-    public Analyzer getAnalyzer() { return analyzer_; }
-    public ExprSubstitutionMap getSmap() { return smap_; }
-
-    public boolean hasAnalyticExprs() {
-      if (queryStmt_ instanceof SelectStmt) {
-        return ((SelectStmt) queryStmt_).hasAnalyticInfo();
-      } else {
-        Preconditions.checkState(queryStmt_ instanceof UnionStmt);
-        return ((UnionStmt) queryStmt_).hasAnalyticExprs();
-      }
-    }
-
-    /**
-     * C'tor for cloning.
-     */
-    private UnionOperand(UnionOperand other) {
-      queryStmt_ = other.queryStmt_.clone();
-      originalQualifier_ = other.originalQualifier_;
-      qualifier_ = other.qualifier_;
-      analyzer_ = other.analyzer_;
-      smap_ = other.smap_.clone();
-    }
-
-    public void reset() {
-      queryStmt_.reset();
-      qualifier_ = originalQualifier_;
-      analyzer_ = null;
-      smap_.clear();
-    }
-
-    @Override
-    public UnionOperand clone() { return new UnionOperand(this); }
-  }
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // before analysis, this contains the list of union operands derived verbatim
-  // from the query;
-  // after analysis, this contains all of distinctOperands followed by allOperands
-  protected final List<UnionOperand> operands_;
-
-  // filled during analyze(); contains all operands that need to go through
-  // distinct aggregation
-  protected final List<UnionOperand> distinctOperands_ = Lists.newArrayList();
-
-  // filled during analyze(); contains all operands that can be aggregated with
-  // a simple merge without duplicate elimination (also needs to merge the output
-  // of the DISTINCT operands)
-  protected final List<UnionOperand> allOperands_ = Lists.newArrayList();
-
-  protected AggregateInfo distinctAggInfo_;  // only set if we have DISTINCT ops
-
- // Single tuple materialized by the union. Set in analyze().
-  protected TupleId tupleId_;
-
-  // set prior to unnesting
-  protected String toSqlString_ = null;
-
-  // true if any of the operands_ references an AnalyticExpr
-  private boolean hasAnalyticExprs_ = false;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  public UnionStmt(List<UnionOperand> operands,
-      ArrayList<OrderByElement> orderByElements, LimitElement limitElement) {
-    super(orderByElements, limitElement);
-    operands_ = operands;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected UnionStmt(UnionStmt other) {
-    super(other.cloneOrderByElements(),
-        (other.limitElement_ == null) ? null : other.limitElement_.clone());
-    operands_ = Lists.newArrayList();
-    if (analyzer_ != null) {
-      for (UnionOperand o: other.distinctOperands_) distinctOperands_.add(o.clone());
-      for (UnionOperand o: other.allOperands_) allOperands_.add(o.clone());
-      operands_.addAll(distinctOperands_);
-      operands_.addAll(allOperands_);
-    } else {
-      for (UnionOperand operand: other.operands_) operands_.add(operand.clone());
-    }
-    analyzer_ = other.analyzer_;
-    distinctAggInfo_ =
-        (other.distinctAggInfo_ != null) ? other.distinctAggInfo_.clone() : null;
-    tupleId_ = other.tupleId_;
-    toSqlString_ = (other.toSqlString_ != null) ? new String(other.toSqlString_) : null;
-    hasAnalyticExprs_ = other.hasAnalyticExprs_;
-    withClause_ = (other.withClause_ != null) ? other.withClause_.clone() : null;
-  }
-
-  public List<UnionOperand> getOperands() { return operands_; }
-  public List<UnionOperand> getDistinctOperands() { return distinctOperands_; }
-  public boolean hasDistinctOps() { return !distinctOperands_.isEmpty(); }
-  public List<UnionOperand> getAllOperands() { return allOperands_; }
-  public boolean hasAllOps() { return !allOperands_.isEmpty(); }
-  public AggregateInfo getDistinctAggInfo() { return distinctAggInfo_; }
-  public boolean hasAnalyticExprs() { return hasAnalyticExprs_; }
-
-  public void removeAllOperands() {
-    operands_.removeAll(allOperands_);
-    allOperands_.clear();
-  }
-
-  /**
-   * Propagates DISTINCT from left to right, and checks that all
-   * union operands are union compatible, adding implicit casts if necessary.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    try {
-      super.analyze(analyzer);
-    } catch (AnalysisException e) {
-      if (analyzer.getMissingTbls().isEmpty()) throw e;
-    }
-    Preconditions.checkState(operands_.size() > 0);
-
-    // Propagates DISTINCT from right to left
-    propagateDistinct();
-
-    // Make sure all operands return an equal number of exprs.
-    QueryStmt firstQuery = operands_.get(0).getQueryStmt();
-
-    try {
-      operands_.get(0).analyze(analyzer);
-    } catch (AnalysisException e) {
-      if (analyzer.getMissingTbls().isEmpty()) throw e;
-    }
-
-    List<List<Expr>> resultExprLists = Lists.newArrayList();
-    List<Expr> firstQueryExprs = firstQuery.getBaseTblResultExprs();
-    resultExprLists.add(firstQueryExprs);
-    for (int i = 1; i < operands_.size(); ++i) {
-      QueryStmt query = operands_.get(i).getQueryStmt();
-      try {
-        operands_.get(i).analyze(analyzer);
-        List<Expr> exprs = query.getBaseTblResultExprs();
-        if (firstQueryExprs.size() != exprs.size()) {
-          throw new AnalysisException("Operands have unequal number of columns:\n" +
-              "'" + queryStmtToSql(firstQuery) + "' has " +
-              firstQueryExprs.size() + " column(s)\n" +
-              "'" + queryStmtToSql(query) + "' has " + exprs.size() + " column(s)");
-        }
-        resultExprLists.add(exprs);
-      } catch (AnalysisException e) {
-        if (analyzer.getMissingTbls().isEmpty()) throw e;
-      }
-    }
-
-    if (!analyzer.getMissingTbls().isEmpty()) {
-      throw new AnalysisException("Found missing tables. Aborting analysis.");
-    }
-
-    // compute hasAnalyticExprs_
-    hasAnalyticExprs_ = false;
-    for (UnionOperand op: operands_) {
-      if (op.hasAnalyticExprs()) {
-        hasAnalyticExprs_ = true;
-        break;
-      }
-    }
-
-    analyzer.castToUnionCompatibleTypes(resultExprLists);
-
-    // Create tuple descriptor materialized by this UnionStmt,
-    // its resultExprs, and its sortInfo if necessary.
-    createMetadata(analyzer);
-    createSortInfo(analyzer);
-    toSqlString_ = toSql();
-
-    unnestOperands(analyzer);
-    if (evaluateOrderBy_) createSortTupleInfo(analyzer);
-    baseTblResultExprs_ = resultExprs_;
-  }
-
-  /**
-   * Marks the baseTblResultExprs of its operands as materialized, based on
-   * which of the output slots have been marked.
-   * Calls materializeRequiredSlots() on the operands themselves.
-   */
-  @Override
-  public void materializeRequiredSlots(Analyzer analyzer) {
-    TupleDescriptor tupleDesc = analyzer.getDescTbl().getTupleDesc(tupleId_);
-    if (!distinctOperands_.isEmpty()) {
-      // to keep things simple we materialize all grouping exprs = output slots,
-      // regardless of what's being referenced externally
-      for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-        slotDesc.setIsMaterialized(true);
-      }
-    }
-
-    if (evaluateOrderBy_) {
-      sortInfo_.materializeRequiredSlots(analyzer, null);
-    }
-
-    // collect operands' result exprs
-    List<SlotDescriptor> outputSlots = tupleDesc.getSlots();
-    List<Expr> exprs = Lists.newArrayList();
-    for (int i = 0; i < outputSlots.size(); ++i) {
-      SlotDescriptor slotDesc = outputSlots.get(i);
-      if (!slotDesc.isMaterialized()) continue;
-      for (UnionOperand op: operands_) {
-        exprs.add(op.getQueryStmt().getBaseTblResultExprs().get(i));
-      }
-      if (distinctAggInfo_ != null) {
-        // also mark the corresponding slot in the distinct agg tuple as being
-        // materialized
-        distinctAggInfo_.getOutputTupleDesc().getSlots().get(i).setIsMaterialized(true);
-      }
-    }
-    materializeSlots(analyzer, exprs);
-
-    for (UnionOperand op: operands_) {
-      op.getQueryStmt().materializeRequiredSlots(analyzer);
-    }
-  }
-
-  /**
-   * Fill distinct-/allOperands and performs possible unnesting of UnionStmt
-   * operands in the process.
-   */
-  private void unnestOperands(Analyzer analyzer) throws AnalysisException {
-    if (operands_.size() == 1) {
-      // ValuesStmt for a single row.
-      allOperands_.add(operands_.get(0));
-      setOperandSmap(operands_.get(0), analyzer);
-      return;
-    }
-
-    // find index of first ALL operand
-    int firstUnionAllIdx = operands_.size();
-    for (int i = 1; i < operands_.size(); ++i) {
-      UnionOperand operand = operands_.get(i);
-      if (operand.getQualifier() == Qualifier.ALL) {
-        firstUnionAllIdx = (i == 1 ? 0 : i);
-        break;
-      }
-    }
-    // operands[0] is always implicitly ALL, so operands[1] can't be the
-    // first one
-    Preconditions.checkState(firstUnionAllIdx != 1);
-
-    // unnest DISTINCT operands
-    Preconditions.checkState(distinctOperands_.isEmpty());
-    for (int i = 0; i < firstUnionAllIdx; ++i) {
-      unnestOperand(distinctOperands_, Qualifier.DISTINCT, operands_.get(i));
-    }
-
-    // unnest ALL operands
-    Preconditions.checkState(allOperands_.isEmpty());
-    for (int i = firstUnionAllIdx; i < operands_.size(); ++i) {
-      unnestOperand(allOperands_, Qualifier.ALL, operands_.get(i));
-    }
-
-    operands_.clear();
-    operands_.addAll(distinctOperands_);
-    operands_.addAll(allOperands_);
-
-    // create unnested operands' smaps
-    for (UnionOperand operand: operands_) {
-      setOperandSmap(operand, analyzer);
-    }
-
-    // create distinctAggInfo, if necessary
-    if (!distinctOperands_.isEmpty()) {
-      // Aggregate produces exactly the same tuple as the original union stmt.
-      ArrayList<Expr> groupingExprs = Expr.cloneList(resultExprs_);
-      try {
-        distinctAggInfo_ =
-            AggregateInfo.create(groupingExprs, null,
-              analyzer.getDescTbl().getTupleDesc(tupleId_), analyzer);
-      } catch (AnalysisException e) {
-        // this should never happen
-        throw new AnalysisException("error creating agg info in UnionStmt.analyze()");
-      }
-    }
-  }
-
-  /**
-   * Sets the smap for the given operand. It maps from the output slots this union's
-   * tuple to the corresponding base table exprs of the operand.
-   */
-  private void setOperandSmap(UnionOperand operand, Analyzer analyzer) {
-    TupleDescriptor tupleDesc = analyzer.getDescTbl().getTupleDesc(tupleId_);
-    // operands' smaps were already set in the operands' analyze()
-    operand.getSmap().clear();
-    for (int i = 0; i < tupleDesc.getSlots().size(); ++i) {
-      SlotDescriptor outputSlot = tupleDesc.getSlots().get(i);
-      operand.getSmap().put(
-          new SlotRef(outputSlot),
-          // TODO: baseTblResultExprs?
-          operand.getQueryStmt().getResultExprs().get(i).clone());
-    }
-  }
-
-  /**
-   * Add a single operand to the target list; if the operand itself is a UnionStmt,
-   * apply unnesting to the extent possible (possibly modifying 'operand' in the process).
-   */
-  private void unnestOperand(
-      List<UnionOperand> target, Qualifier targetQualifier, UnionOperand operand) {
-    QueryStmt queryStmt = operand.getQueryStmt();
-    if (queryStmt instanceof SelectStmt) {
-      target.add(operand);
-      return;
-    }
-
-    Preconditions.checkState(queryStmt instanceof UnionStmt);
-    UnionStmt unionStmt = (UnionStmt) queryStmt;
-    if (unionStmt.hasLimit() || unionStmt.hasOffset()) {
-      // we must preserve the nested Union
-      target.add(operand);
-    } else if (targetQualifier == Qualifier.DISTINCT || !unionStmt.hasDistinctOps()) {
-      // there is no limit in the nested Union and we can absorb all of its
-      // operands as-is
-      target.addAll(unionStmt.getDistinctOperands());
-      target.addAll(unionStmt.getAllOperands());
-    } else {
-      // the nested Union contains some Distinct ops and we're accumulating
-      // into our All ops; unnest only the All ops and leave the rest in place
-      target.addAll(unionStmt.getAllOperands());
-      unionStmt.removeAllOperands();
-      target.add(operand);
-    }
-  }
-
-  /**
-   * String representation of queryStmt used in reporting errors.
-   * Allow subclasses to override this.
-   */
-  protected String queryStmtToSql(QueryStmt queryStmt) {
-    return queryStmt.toSql();
-  }
-
-  /**
-   * Propagates DISTINCT (if present) from right to left.
-   * Implied associativity:
-   * A UNION ALL B UNION DISTINCT C = (A UNION ALL B) UNION DISTINCT C
-   * = A UNION DISTINCT B UNION DISTINCT C
-   */
-  private void propagateDistinct() {
-    int lastDistinctPos = -1;
-    for (int i = operands_.size() - 1; i > 0; --i) {
-      UnionOperand operand = operands_.get(i);
-      if (lastDistinctPos != -1) {
-        // There is a DISTINCT somewhere to the right.
-        operand.setQualifier(Qualifier.DISTINCT);
-      } else if (operand.getQualifier() == Qualifier.DISTINCT) {
-        lastDistinctPos = i;
-      }
-    }
-  }
-
-  /**
-   * Create a descriptor for the tuple materialized by the union.
-   * Set resultExprs to be slot refs into that tuple.
-   * Also fills the substitution map, such that "order by" can properly resolve
-   * column references from the result of the union.
-   */
-  private void createMetadata(Analyzer analyzer) throws AnalysisException {
-    // Create tuple descriptor for materialized tuple created by the union.
-    TupleDescriptor tupleDesc = analyzer.getDescTbl().createTupleDescriptor("union");
-    tupleDesc.setIsMaterialized(true);
-    tupleId_ = tupleDesc.getId();
-    LOG.trace("UnionStmt.createMetadata: tupleId=" + tupleId_.toString());
-
-    // One slot per expr in the select blocks. Use first select block as representative.
-    List<Expr> firstSelectExprs = operands_.get(0).getQueryStmt().getBaseTblResultExprs();
-
-    // Compute column stats for the materialized slots from the source exprs.
-    List<ColumnStats> columnStats = Lists.newArrayList();
-    for (int i = 0; i < operands_.size(); ++i) {
-      List<Expr> selectExprs = operands_.get(i).getQueryStmt().getBaseTblResultExprs();
-      for (int j = 0; j < selectExprs.size(); ++j) {
-        ColumnStats statsToAdd = ColumnStats.fromExpr(selectExprs.get(j));
-        if (i == 0) {
-          columnStats.add(statsToAdd);
-        } else {
-          columnStats.get(j).add(statsToAdd);
-        }
-      }
-    }
-
-    // Create tuple descriptor and slots.
-    for (int i = 0; i < firstSelectExprs.size(); ++i) {
-      Expr expr = firstSelectExprs.get(i);
-      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(tupleDesc);
-      slotDesc.setLabel(getColLabels().get(i));
-      slotDesc.setType(expr.getType());
-      slotDesc.setStats(columnStats.get(i));
-      SlotRef outputSlotRef = new SlotRef(slotDesc);
-      resultExprs_.add(outputSlotRef);
-
-      // Add to aliasSMap so that column refs in "order by" can be resolved.
-      if (orderByElements_ != null) {
-        SlotRef aliasRef = new SlotRef(getColLabels().get(i));
-        if (aliasSmap_.containsMappingFor(aliasRef)) {
-          ambiguousAliasList_.add(aliasRef);
-        } else {
-          aliasSmap_.put(aliasRef, outputSlotRef);
-        }
-      }
-
-      // register single-directional value transfers from output slot
-      // to operands' result exprs (if those happen to be slotrefs);
-      // don't do that if the operand computes analytic exprs
-      // (see Planner.createInlineViewPlan() for the reasoning)
-      for (UnionOperand op: operands_) {
-        Expr resultExpr = op.getQueryStmt().getBaseTblResultExprs().get(i);
-        slotDesc.addSourceExpr(resultExpr);
-        if (op.hasAnalyticExprs()) continue;
-        SlotRef slotRef = resultExpr.unwrapSlotRef(true);
-        if (slotRef == null) continue;
-        analyzer.registerValueTransfer(outputSlotRef.getSlotId(), slotRef.getSlotId());
-      }
-    }
-    baseTblResultExprs_ = resultExprs_;
-  }
-
-  public TupleId getTupleId() { return tupleId_; }
-
-  @Override
-  public void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList) {
-    // Return the sort tuple if there is an evaluated order by.
-    if (evaluateOrderBy_) {
-      tupleIdList.add(sortInfo_.getSortTupleDescriptor().getId());
-    } else {
-      tupleIdList.add(tupleId_);
-    }
-  }
-
-  @Override
-  public void collectTableRefs(List<TableRef> tblRefs) {
-    for (UnionOperand op: operands_) {
-      op.getQueryStmt().collectTableRefs(tblRefs);
-    }
-  }
-
-  @Override
-  public String toSql() {
-    if (toSqlString_ != null) return toSqlString_;
-    StringBuilder strBuilder = new StringBuilder();
-    Preconditions.checkState(operands_.size() > 0);
-
-    if (withClause_ != null) {
-      strBuilder.append(withClause_.toSql());
-      strBuilder.append(" ");
-    }
-
-    strBuilder.append(operands_.get(0).getQueryStmt().toSql());
-    for (int i = 1; i < operands_.size() - 1; ++i) {
-      strBuilder.append(" UNION " +
-          ((operands_.get(i).getQualifier() == Qualifier.ALL) ? "ALL " : ""));
-      if (operands_.get(i).getQueryStmt() instanceof UnionStmt) {
-        strBuilder.append("(");
-      }
-      strBuilder.append(operands_.get(i).getQueryStmt().toSql());
-      if (operands_.get(i).getQueryStmt() instanceof UnionStmt) {
-        strBuilder.append(")");
-      }
-    }
-    // Determine whether we need parenthesis around the last union operand.
-    UnionOperand lastOperand = operands_.get(operands_.size() - 1);
-    QueryStmt lastQueryStmt = lastOperand.getQueryStmt();
-    strBuilder.append(" UNION " +
-        ((lastOperand.getQualifier() == Qualifier.ALL) ? "ALL " : ""));
-    if (lastQueryStmt instanceof UnionStmt ||
-        ((hasOrderByClause() || hasLimit() || hasOffset()) &&
-            !lastQueryStmt.hasLimit() && !lastQueryStmt.hasOffset() &&
-            !lastQueryStmt.hasOrderByClause())) {
-      strBuilder.append("(");
-      strBuilder.append(lastQueryStmt.toSql());
-      strBuilder.append(")");
-    } else {
-      strBuilder.append(lastQueryStmt.toSql());
-    }
-    // Order By clause
-    if (hasOrderByClause()) {
-      strBuilder.append(" ORDER BY ");
-      for (int i = 0; i < orderByElements_.size(); ++i) {
-        strBuilder.append(orderByElements_.get(i).toSql());
-        strBuilder.append((i+1 != orderByElements_.size()) ? ", " : "");
-      }
-    }
-    // Limit clause.
-    strBuilder.append(limitElement_.toSql());
-    return strBuilder.toString();
-  }
-
-  @Override
-  public ArrayList<String> getColLabels() {
-    Preconditions.checkState(operands_.size() > 0);
-    return operands_.get(0).getQueryStmt().getColLabels();
-  }
-
-  @Override
-  public UnionStmt clone() { return new UnionStmt(this); }
-
-  @Override
-  public void reset() {
-    super.reset();
-    for (UnionOperand op: operands_) op.reset();
-    distinctOperands_.clear();
-    allOperands_.clear();
-    distinctAggInfo_ = null;
-    tupleId_ = null;
-    toSqlString_ = null;
-    hasAnalyticExprs_ = false;
-  }
-}



[15/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/CatalogOpExecutor.java b/fe/src/main/java/com/cloudera/impala/service/CatalogOpExecutor.java
deleted file mode 100644
index 4814503..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/CatalogOpExecutor.java
+++ /dev/null
@@ -1,2990 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnNotFoundException;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.HiveStorageDescriptorFactory;
-import com.cloudera.impala.catalog.IncompleteTable;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.catalog.PartitionNotFoundException;
-import com.cloudera.impala.catalog.PartitionStatsUtil;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.TableNotFoundException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.catalog.delegates.DdlDelegate;
-import com.cloudera.impala.catalog.delegates.KuduDdlDelegate;
-import com.cloudera.impala.catalog.delegates.UnsupportedOpDelegate;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.JniCatalogConstants;
-import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
-import com.cloudera.impala.thrift.TAlterTableChangeColParams;
-import com.cloudera.impala.thrift.TAlterTableDropColParams;
-import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
-import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
-import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
-import com.cloudera.impala.thrift.TAlterTableSetTblPropertiesParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCatalogUpdateResult;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnStats;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TCreateDataSourceParams;
-import com.cloudera.impala.thrift.TCreateDbParams;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
-import com.cloudera.impala.thrift.TCreateFunctionParams;
-import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
-import com.cloudera.impala.thrift.TCreateTableLikeParams;
-import com.cloudera.impala.thrift.TCreateTableParams;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TDdlExecRequest;
-import com.cloudera.impala.thrift.TDdlExecResponse;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.thrift.TDropDataSourceParams;
-import com.cloudera.impala.thrift.TDropDbParams;
-import com.cloudera.impala.thrift.TDropFunctionParams;
-import com.cloudera.impala.thrift.TDropStatsParams;
-import com.cloudera.impala.thrift.TDropTableOrViewParams;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TGrantRevokePrivParams;
-import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
-import com.cloudera.impala.thrift.THdfsCachingOp;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TResetMetadataRequest;
-import com.cloudera.impala.thrift.TResetMetadataResponse;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.thrift.TTruncateParams;
-import com.cloudera.impala.thrift.TUpdateCatalogRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogResponse;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Class used to execute Catalog Operations, including DDL and refresh/invalidate
- * metadata requests. Acts as a bridge between the Thrift catalog operation requests
- * and the non-thrift Java Catalog objects.
- *
- * Updates are applied first to the Hive Metastore and only if they succeed, are then
- * applied to the catalog objects. To ensure consistency in the presence of failed HMS
- * updates, DDL operations should not directly modify the HMS objects of the catalog
- * objects but should operate on copies instead.
- *
- * The CatalogOpExecutor uses table-level locking to protect table metadata during
- * concurrent modifications and is responsible for assigning a new catalog version when
- * a table is modified (e.g. alterTable()).
- *
- * The following locking protocol is employed to ensure that modifying
- * the table metadata and assigning a new catalog version is performed atomically and
- * consistently in the presence of concurrent DDL operations:
- * 1. Acquire the catalog lock (see CatalogServiceCatalog.catalogLock_)
- * 2. Acquire a table lock
- * 3. Increment and get a new catalog version
- * 4. Release the catalog lock
- * 5. Modify table metadata
- * 6. Release table lock
- *
- * Operations that CREATE/DROP catalog objects such as tables and databases employ the
- * following locking protocol:
- * 1. Acquire the metastoreDdlLock_
- * 2. Update the Hive Metastore
- * 3. Increment and get a new catalog version
- * 4. Update the catalog
- * 5. Release the metastoreDdlLock_
- *
- * It is imperative that other operations that need to hold both the catalog lock and
- * table locks at the same time follow the same locking protocol and acquire these
- * locks in that particular order. Also, operations that modify table metadata
- * (e.g. alter table statements) should not acquire the metastoreDdlLock_.
- *
- * TODO: Refactor the CatalogOpExecutor and CatalogServiceCatalog classes and consolidate
- * the locking protocol into a single class.
- *
- * TODO: Improve catalog's consistency guarantees by using a hierarchical locking scheme.
- * Currently, only concurrent modidications to table metadata are guaranteed to be
- * serialized. Concurrent DDL operations that DROP/ADD catalog objects,
- * especially in the presence of INVALIDATE METADATA and REFRESH, are not guaranteed to
- * be consistent (see IMPALA-2774).
- *
- * TODO: Create a Hive Metastore utility class to move code that interacts with the
- * metastore out of this class.
- */
-public class CatalogOpExecutor {
-  // Format string for exceptions returned by Hive Metastore RPCs.
-  private final static String HMS_RPC_ERROR_FORMAT_STR =
-      "Error making '%s' RPC to Hive Metastore: ";
-
-  private final CatalogServiceCatalog catalog_;
-
-  // Lock used to ensure that CREATE[DROP] TABLE[DATABASE] operations performed in
-  // catalog_ and the corresponding RPC to apply the change in HMS are atomic.
-  private final Object metastoreDdlLock_ = new Object();
-  private static final Logger LOG = Logger.getLogger(CatalogOpExecutor.class);
-
-  // The maximum number of partitions to update in one Hive Metastore RPC.
-  // Used when persisting the results of COMPUTE STATS statements.
-  private final static short MAX_PARTITION_UPDATES_PER_RPC = 500;
-
-  public CatalogOpExecutor(CatalogServiceCatalog catalog) {
-    catalog_ = catalog;
-  }
-
-  public TDdlExecResponse execDdlRequest(TDdlExecRequest ddlRequest)
-      throws ImpalaException {
-    TDdlExecResponse response = new TDdlExecResponse();
-    response.setResult(new TCatalogUpdateResult());
-    response.getResult().setCatalog_service_id(JniCatalog.getServiceId());
-    User requestingUser = null;
-    if (ddlRequest.isSetHeader()) {
-      requestingUser = new User(ddlRequest.getHeader().getRequesting_user());
-    }
-
-    switch (ddlRequest.ddl_type) {
-      case ALTER_TABLE:
-        alterTable(ddlRequest.getAlter_table_params(), response);
-        break;
-      case ALTER_VIEW:
-        alterView(ddlRequest.getAlter_view_params(), response);
-        break;
-      case CREATE_DATABASE:
-        createDatabase(ddlRequest.getCreate_db_params(), response);
-        break;
-      case CREATE_TABLE_AS_SELECT:
-        response.setNew_table_created(
-            createTable(ddlRequest.getCreate_table_params(), response));
-        break;
-      case CREATE_TABLE:
-        createTable(ddlRequest.getCreate_table_params(), response);
-        break;
-      case CREATE_TABLE_LIKE:
-        createTableLike(ddlRequest.getCreate_table_like_params(), response);
-        break;
-      case CREATE_VIEW:
-        createView(ddlRequest.getCreate_view_params(), response);
-        break;
-      case CREATE_FUNCTION:
-        createFunction(ddlRequest.getCreate_fn_params(), response);
-        break;
-      case CREATE_DATA_SOURCE:
-        createDataSource(ddlRequest.getCreate_data_source_params(), response);
-        break;
-      case COMPUTE_STATS:
-        Preconditions.checkState(false, "Compute stats should trigger an ALTER TABLE.");
-        break;
-      case DROP_STATS:
-        dropStats(ddlRequest.getDrop_stats_params(), response);
-        break;
-      case DROP_DATABASE:
-        dropDatabase(ddlRequest.getDrop_db_params(), response);
-        break;
-      case DROP_TABLE:
-      case DROP_VIEW:
-        dropTableOrView(ddlRequest.getDrop_table_or_view_params(), response);
-        break;
-      case TRUNCATE_TABLE:
-        truncateTable(ddlRequest.getTruncate_params(), response);
-        break;
-      case DROP_FUNCTION:
-        dropFunction(ddlRequest.getDrop_fn_params(), response);
-        break;
-      case DROP_DATA_SOURCE:
-        dropDataSource(ddlRequest.getDrop_data_source_params(), response);
-        break;
-      case CREATE_ROLE:
-      case DROP_ROLE:
-        createDropRole(requestingUser, ddlRequest.getCreate_drop_role_params(),
-            response);
-        break;
-      case GRANT_ROLE:
-      case REVOKE_ROLE:
-        grantRevokeRoleGroup(requestingUser, ddlRequest.getGrant_revoke_role_params(),
-            response);
-        break;
-      case GRANT_PRIVILEGE:
-      case REVOKE_PRIVILEGE:
-        grantRevokeRolePrivilege(requestingUser,
-            ddlRequest.getGrant_revoke_priv_params(), response);
-        break;
-      default: throw new IllegalStateException("Unexpected DDL exec request type: " +
-          ddlRequest.ddl_type);
-    }
-
-    // For responses that contain updates to catalog objects, check that the response
-    // either exclusively uses the single updated/removed field or the corresponding list
-    // versions of the fields, but not a mix.
-    // The non-list version of the fields are maintained for backwards compatibility,
-    // e.g., BDR relies on a stable catalog API.
-    TCatalogUpdateResult result = response.getResult();
-    Preconditions.checkState(!
-        ((result.isSetUpdated_catalog_object_DEPRECATED()
-        || result.isSetRemoved_catalog_object_DEPRECATED())
-        &&
-        (result.isSetUpdated_catalog_objects()
-        || result.isSetRemoved_catalog_objects())));
-
-    // At this point, the operation is considered successful. If any errors occurred
-    // during execution, this function will throw an exception and the CatalogServer
-    // will handle setting a bad status code.
-    response.getResult().setStatus(new TStatus(TErrorCode.OK, new ArrayList<String>()));
-    return response;
-  }
-
-  /**
-   * Execute the ALTER TABLE command according to the TAlterTableParams and refresh the
-   * table metadata, except for RENAME, ADD PARTITION and DROP PARTITION. This call is
-   * thread-safe, i.e. concurrent operations on the same table are serialized.
-   */
-  private void alterTable(TAlterTableParams params, TDdlExecResponse response)
-      throws ImpalaException {
-    // When true, loads the file/block metadata.
-    boolean reloadFileMetadata = false;
-    // When true, loads the table schema and the column stats from the Hive Metastore.
-    boolean reloadTableSchema = false;
-
-    TableName tableName = TableName.fromThrift(params.getTable_name());
-    Table tbl = getExistingTable(tableName.getDb(), tableName.getTbl());
-    catalog_.getLock().writeLock().lock();
-    synchronized (tbl) {
-      if (params.getAlter_type() == TAlterTableType.RENAME_VIEW
-          || params.getAlter_type() == TAlterTableType.RENAME_TABLE) {
-        // RENAME is implemented as an ADD + DROP, so we need to execute it as we hold
-        // the catalog lock.
-        try {
-          alterTableOrViewRename(tbl,
-              TableName.fromThrift(params.getRename_params().getNew_table_name()),
-              response);
-          return;
-        } finally {
-          catalog_.getLock().writeLock().unlock();
-        }
-      }
-      // Get a new catalog version to assign to the table being altered.
-      long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
-      catalog_.getLock().writeLock().unlock();
-      switch (params.getAlter_type()) {
-        case ADD_REPLACE_COLUMNS:
-          TAlterTableAddReplaceColsParams addReplaceColParams =
-              params.getAdd_replace_cols_params();
-          alterTableAddReplaceCols(tbl, addReplaceColParams.getColumns(),
-              addReplaceColParams.isReplace_existing_cols());
-          reloadTableSchema = true;
-          break;
-        case ADD_PARTITION:
-          TAlterTableAddPartitionParams addPartParams = params.getAdd_partition_params();
-          // Create and add HdfsPartition object to the corresponding HdfsTable and load
-          // its block metadata. Get the new table object with an updated catalog
-          // version. If the partition already exists in Hive and "IfNotExists" is true,
-          // then return without populating the response object.
-          Table refreshedTable = alterTableAddPartition(tbl,
-              addPartParams.getPartition_spec(), addPartParams.isIf_not_exists(),
-              addPartParams.getLocation(), addPartParams.getCache_op());
-          if (refreshedTable != null) {
-            refreshedTable.setCatalogVersion(newCatalogVersion);
-            addTableToCatalogUpdate(refreshedTable, response.result);
-          }
-          return;
-        case DROP_COLUMN:
-          TAlterTableDropColParams dropColParams = params.getDrop_col_params();
-          alterTableDropCol(tbl, dropColParams.getCol_name());
-          reloadTableSchema = true;
-          break;
-        case CHANGE_COLUMN:
-          TAlterTableChangeColParams changeColParams = params.getChange_col_params();
-          alterTableChangeCol(tbl, changeColParams.getCol_name(),
-              changeColParams.getNew_col_def());
-          reloadTableSchema = true;
-          break;
-        case DROP_PARTITION:
-          TAlterTableDropPartitionParams dropPartParams =
-              params.getDrop_partition_params();
-          // Drop the partition from the corresponding table. Get the table object
-          // with an updated catalog version. If the partition does not exist and
-          // "IfExists" is true, then return without populating the response object.
-          // If "purge" option is specified partition data is purged by skipping
-          // Trash, if configured.
-          refreshedTable = alterTableDropPartition(tbl,
-              dropPartParams.getPartition_spec(),
-              dropPartParams.isIf_exists(), dropPartParams.isPurge());
-          if (refreshedTable != null) {
-            refreshedTable.setCatalogVersion(newCatalogVersion);
-            addTableToCatalogUpdate(refreshedTable, response.result);
-          }
-          return;
-        case RENAME_TABLE:
-        case RENAME_VIEW:
-          Preconditions.checkState(false,
-              "RENAME TABLE/VIEW operation has been processed");
-          return;
-        case SET_FILE_FORMAT:
-          TAlterTableSetFileFormatParams fileFormatParams =
-              params.getSet_file_format_params();
-          List<TPartitionKeyValue> fileFormatPartitionSpec = null;
-          if (fileFormatParams.isSetPartition_spec()) {
-            fileFormatPartitionSpec = fileFormatParams.getPartition_spec();
-          }
-          reloadFileMetadata = alterTableSetFileFormat(tbl, fileFormatPartitionSpec,
-              fileFormatParams.getFile_format());
-          break;
-        case SET_LOCATION:
-          TAlterTableSetLocationParams setLocationParams =
-              params.getSet_location_params();
-          List<TPartitionKeyValue> partitionSpec = null;
-          if (setLocationParams.isSetPartition_spec()) {
-            partitionSpec = setLocationParams.getPartition_spec();
-          }
-          reloadFileMetadata = alterTableSetLocation(tbl, partitionSpec,
-              setLocationParams.getLocation());
-          break;
-        case SET_TBL_PROPERTIES:
-          alterTableSetTblProperties(tbl, params.getSet_tbl_properties_params());
-          break;
-        case UPDATE_STATS:
-          Preconditions.checkState(params.isSetUpdate_stats_params());
-          alterTableUpdateStats(tbl, params.getUpdate_stats_params(), response);
-          reloadTableSchema = true;
-          break;
-        case SET_CACHED:
-          Preconditions.checkState(params.isSetSet_cached_params());
-          if (params.getSet_cached_params().getPartition_spec() == null) {
-            reloadFileMetadata = alterTableSetCached(tbl, params.getSet_cached_params());
-          } else {
-            alterPartitionSetCached(tbl, params.getSet_cached_params());
-          }
-          break;
-        case RECOVER_PARTITIONS:
-          alterTableRecoverPartitions(tbl);
-          break;
-        default:
-          throw new UnsupportedOperationException(
-              "Unknown ALTER TABLE operation type: " + params.getAlter_type());
-      }
-
-      loadTableMetadata(tbl, newCatalogVersion, reloadFileMetadata, reloadTableSchema,
-          null);
-      addTableToCatalogUpdate(tbl, response.result);
-    } // end of synchronized block
-  }
-
-  /**
-   * Loads the metadata of a table 'tbl' and assigns a new catalog version.
-   * reloadFileMetadata', 'reloadTableSchema', and 'partitionsToUpdate'
-   * are used only for HdfsTables and control which metadata to reload.
-   * Throws a CatalogException if there is an error loading table metadata.
-   */
-  private void loadTableMetadata(Table tbl, long newCatalogVersion,
-      boolean reloadFileMetadata, boolean reloadTableSchema,
-      Set<String> partitionsToUpdate) throws CatalogException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      org.apache.hadoop.hive.metastore.api.Table msTbl =
-          getMetaStoreTable(msClient, tbl);
-      if (tbl instanceof HdfsTable) {
-        ((HdfsTable) tbl).load(true, msClient.getHiveClient(), msTbl,
-            reloadFileMetadata, reloadTableSchema, partitionsToUpdate);
-      } else {
-        tbl.load(true, msClient.getHiveClient(), msTbl);
-      }
-    }
-    tbl.setCatalogVersion(newCatalogVersion);
-  }
-
-  /**
-   * Serializes and adds table 'tbl' to a TCatalogUpdateResult object. Uses the
-   * version of the serialized table as the version of the catalog update result.
-   */
-  private static void addTableToCatalogUpdate(Table tbl, TCatalogUpdateResult result) {
-    TCatalogObject updatedCatalogObject = TableToTCatalogObject(tbl);
-    result.setUpdated_catalog_object_DEPRECATED(TableToTCatalogObject(tbl));
-    result.setVersion(updatedCatalogObject.getCatalog_version());
-  }
-
-  /**
-   * Creates a new HdfsPartition object and adds it to the corresponding HdfsTable.
-   * Does not create the object in the Hive metastore.
-   */
-  private Table addHdfsPartition(Table tbl, Partition partition)
-      throws CatalogException {
-    Preconditions.checkNotNull(tbl);
-    Preconditions.checkNotNull(partition);
-    if (!(tbl instanceof HdfsTable)) {
-      throw new CatalogException("Table " + tbl.getFullName() + " is not an HDFS table");
-    }
-    HdfsTable hdfsTable = (HdfsTable) tbl;
-    HdfsPartition hdfsPartition = hdfsTable.createPartition(partition.getSd(), partition);
-    return catalog_.addPartition(hdfsPartition);
-  }
-
-  /**
-   * Alters an existing view's definition in the metastore. Throws an exception
-   * if the view does not exist or if the existing metadata entry is
-   * a table instead of a a view.
-   */
-   private void alterView(TCreateOrAlterViewParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    TableName tableName = TableName.fromThrift(params.getView_name());
-    Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    Preconditions.checkState(params.getColumns() != null &&
-        params.getColumns().size() > 0,
-          "Null or empty column list given as argument to DdlExecutor.alterView");
-    Table tbl = catalog_.getTable(tableName.getDb(), tableName.getTbl());
-    Preconditions.checkState(tbl instanceof View);
-    catalog_.getLock().writeLock().lock();
-    synchronized(tbl) {
-      long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
-      catalog_.getLock().writeLock().unlock();
-      // Operate on a copy of the metastore table to avoid prematurely applying the
-      // alteration to our cached table in case the actual alteration fails.
-      org.apache.hadoop.hive.metastore.api.Table msTbl =
-          tbl.getMetaStoreTable().deepCopy();
-      if (!msTbl.getTableType().equalsIgnoreCase((TableType.VIRTUAL_VIEW.toString()))) {
-        throw new ImpalaRuntimeException(
-            String.format("ALTER VIEW not allowed on a table: %s",
-                tableName.toString()));
-      }
-
-      // Set the altered view attributes and update the metastore.
-      setViewAttributes(params, msTbl);
-      LOG.debug(String.format("Altering view %s", tableName));
-      applyAlterTable(msTbl);
-      try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-        tbl.load(true, msClient.getHiveClient(), msTbl);
-      }
-      tbl.setCatalogVersion(newCatalogVersion);
-      addTableToCatalogUpdate(tbl, resp.result);
-    }
-  }
-
-  /**
-   * Alters an existing table's table and/or column statistics. Partitions are updated
-   * in batches of size 'MAX_PARTITION_UPDATES_PER_RPC'.
-   */
-  private void alterTableUpdateStats(Table table, TAlterTableUpdateStatsParams params,
-      TDdlExecResponse resp) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(table));
-    if (params.isSetTable_stats()) {
-      // Updating table and column stats via COMPUTE STATS.
-      Preconditions.checkState(
-          params.isSetPartition_stats() && params.isSetTable_stats());
-    } else {
-      // Only changing column stats via ALTER TABLE SET COLUMN STATS.
-      Preconditions.checkState(params.isSetColumn_stats());
-    }
-
-    TableName tableName = table.getTableName();
-    Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    LOG.info(String.format("Updating table stats for: %s", tableName));
-
-    // Deep copy the msTbl to avoid updating our cache before successfully persisting
-    // the results to the metastore.
-    org.apache.hadoop.hive.metastore.api.Table msTbl =
-        table.getMetaStoreTable().deepCopy();
-    List<HdfsPartition> partitions = Lists.newArrayList();
-    if (table instanceof HdfsTable) {
-      // Build a list of non-default partitions to update.
-      HdfsTable hdfsTable = (HdfsTable) table;
-      for (HdfsPartition p: hdfsTable.getPartitions()) {
-        if (!p.isDefaultPartition()) partitions.add(p);
-      }
-    }
-
-    int numTargetedPartitions = 0;
-    int numUpdatedColumns = 0;
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      // Update the table and partition row counts based on the query results.
-      List<HdfsPartition> modifiedParts = Lists.newArrayList();
-      if (params.isSetTable_stats()) {
-        numTargetedPartitions = updateTableStats(table, params, msTbl, partitions,
-            modifiedParts);
-      }
-
-      ColumnStatistics colStats = null;
-      if (params.isSetColumn_stats()) {
-        // Create Hive column stats from the query results.
-        colStats = createHiveColStats(params.getColumn_stats(), table);
-        numUpdatedColumns = colStats.getStatsObjSize();
-      }
-
-      // Update all partitions.
-      bulkAlterPartitions(table.getDb().getName(), table.getName(), modifiedParts);
-      if (numUpdatedColumns > 0) {
-        Preconditions.checkNotNull(colStats);
-        // Update column stats.
-        try {
-          msClient.getHiveClient().updateTableColumnStatistics(colStats);
-        } catch (Exception e) {
-          throw new ImpalaRuntimeException(String.format(HMS_RPC_ERROR_FORMAT_STR,
-                  "updateTableColumnStatistics"), e);
-        }
-      }
-      // Update the table stats. Apply the table alteration last to ensure the
-      // lastDdlTime is as accurate as possible.
-      applyAlterTable(msTbl);
-    }
-
-    // Set the results to be reported to the client.
-    TResultSet resultSet = new TResultSet();
-    resultSet.setSchema(new TResultSetMetadata(Lists.newArrayList(
-        new TColumn("summary", Type.STRING.toThrift()))));
-    TColumnValue resultColVal = new TColumnValue();
-    resultColVal.setString_val("Updated " + numTargetedPartitions + " partition(s) and " +
-        numUpdatedColumns + " column(s).");
-    TResultRow resultRow = new TResultRow();
-    resultRow.setColVals(Lists.newArrayList(resultColVal));
-    resultSet.setRows(Lists.newArrayList(resultRow));
-    resp.setResult_set(resultSet);
-  }
-
-  /**
-   * Updates the row counts of the given Hive partitions and the total row count of the
-   * given Hive table based on the given update stats parameters. The partitions whose
-   * row counts have not changed are skipped. The modified partitions are returned
-   * in the modifiedParts parameter.
-   * Row counts for missing or new partitions as a result of concurrent table
-   * alterations are set to 0.
-   * Returns the number of partitions that were targeted for update (includes partitions
-   * whose row counts have not changed).
-   */
-  private int updateTableStats(Table table, TAlterTableUpdateStatsParams params,
-      org.apache.hadoop.hive.metastore.api.Table msTbl,
-      List<HdfsPartition> partitions, List<HdfsPartition> modifiedParts)
-      throws ImpalaException {
-    Preconditions.checkState(params.isSetPartition_stats());
-    Preconditions.checkState(params.isSetTable_stats());
-    // Update the partitions' ROW_COUNT parameter.
-    int numTargetedPartitions = 0;
-    for (HdfsPartition partition: partitions) {
-      // NULL keys are returned as 'NULL' in the partition_stats map, so don't substitute
-      // this partition's keys with Hive's replacement value.
-      List<String> partitionValues = partition.getPartitionValuesAsStrings(false);
-      TPartitionStats partitionStats = params.partition_stats.get(partitionValues);
-      if (partitionStats == null) {
-        // No stats were collected for this partition. This means that it was not included
-        // in the original computation statements. If the backend does not find any rows
-        // for a partition that should be included, it will generate an empty
-        // TPartitionStats object.
-        if (params.expect_all_partitions == false) continue;
-
-        // If all partitions are expected, fill in any missing stats with an empty entry.
-        partitionStats = new TPartitionStats();
-        if (params.is_incremental) {
-          partitionStats.intermediate_col_stats = Maps.newHashMap();
-        }
-        partitionStats.stats = new TTableStats();
-        partitionStats.stats.setNum_rows(0L);
-      }
-
-      // Unconditionally update the partition stats and row count, even if the partition
-      // already has identical ones. This behavior results in possibly redundant work,
-      // but it is predictable and easy to reason about because it does not depend on the
-      // existing state of the metadata. See IMPALA-2201.
-      long numRows = partitionStats.stats.num_rows;
-      LOG.debug(String.format("Updating stats for partition %s: numRows=%s",
-          partition.getValuesAsString(), numRows));
-      PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
-      partition.putToParameters(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
-      partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-          StatsSetupConst.TRUE);
-      ++numTargetedPartitions;
-      modifiedParts.add(partition);
-    }
-
-    // For unpartitioned tables and HBase tables report a single updated partition.
-    if (table.getNumClusteringCols() == 0 || table instanceof HBaseTable) {
-      numTargetedPartitions = 1;
-      if (table instanceof HdfsTable) {
-        Preconditions.checkState(modifiedParts.size() == 1);
-        // Delete stats for this partition as they are included in table stats.
-        PartitionStatsUtil.deletePartStats(modifiedParts.get(0));
-      }
-    }
-
-    // Update the table's ROW_COUNT parameter.
-    msTbl.putToParameters(StatsSetupConst.ROW_COUNT,
-        String.valueOf(params.getTable_stats().num_rows));
-    msTbl.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-        StatsSetupConst.TRUE);
-    return numTargetedPartitions;
-  }
-
-  /**
-   * Create Hive column statistics for the given table based on the give map from column
-   * name to column stats. Missing or new columns as a result of concurrent table
-   * alterations are ignored.
-   */
-  private static ColumnStatistics createHiveColStats(
-      Map<String, TColumnStats> columnStats, Table table) {
-    // Collection of column statistics objects to be returned.
-    ColumnStatistics colStats = new ColumnStatistics();
-    colStats.setStatsDesc(
-        new ColumnStatisticsDesc(true, table.getDb().getName(), table.getName()));
-    // Generate Hive column stats objects from the update stats params.
-    for (Map.Entry<String, TColumnStats> entry: columnStats.entrySet()) {
-      String colName = entry.getKey();
-      Column tableCol = table.getColumn(entry.getKey());
-      // Ignore columns that were dropped in the meantime.
-      if (tableCol == null) continue;
-      ColumnStatisticsData colStatsData =
-          createHiveColStatsData(entry.getValue(), tableCol.getType());
-      if (colStatsData == null) continue;
-      LOG.debug(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " +
-          "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(),
-          entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
-          entry.getValue().getAvg_size()));
-      ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(colName,
-          tableCol.getType().toString(), colStatsData);
-      colStats.addToStatsObj(colStatsObj);
-    }
-    return colStats;
-  }
-
-  private static ColumnStatisticsData createHiveColStatsData(TColumnStats colStats,
-      Type colType) {
-    ColumnStatisticsData colStatsData = new ColumnStatisticsData();
-    long ndvs = colStats.getNum_distinct_values();
-    long numNulls = colStats.getNum_nulls();
-    switch(colType.getPrimitiveType()) {
-      case BOOLEAN:
-        // TODO: Gather and set the numTrues and numFalse stats as well. The planner
-        // currently does not rely on them.
-        colStatsData.setBooleanStats(new BooleanColumnStatsData(1, -1, numNulls));
-        break;
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-      case TIMESTAMP: // Hive and Impala use LongColumnStatsData for timestamps.
-        // TODO: Gather and set the min/max values stats as well. The planner
-        // currently does not rely on them.
-        colStatsData.setLongStats(new LongColumnStatsData(numNulls, ndvs));
-        break;
-      case FLOAT:
-      case DOUBLE:
-        // TODO: Gather and set the min/max values stats as well. The planner
-        // currently does not rely on them.
-        colStatsData.setDoubleStats(new DoubleColumnStatsData(numNulls, ndvs));
-        break;
-      case CHAR:
-      case VARCHAR:
-      case STRING:
-        long maxStrLen = colStats.getMax_size();
-        double avgStrLen = colStats.getAvg_size();
-        colStatsData.setStringStats(
-            new StringColumnStatsData(maxStrLen, avgStrLen, numNulls, ndvs));
-        break;
-      case DECIMAL:
-        // TODO: Gather and set the min/max values stats as well. The planner
-        // currently does not rely on them.
-        colStatsData.setDecimalStats(
-            new DecimalColumnStatsData(numNulls, ndvs));
-        break;
-      default:
-        return null;
-    }
-    return colStatsData;
-  }
-
-  /**
-   * Creates a new database in the metastore and adds the db name to the internal
-   * metadata cache, marking its metadata to be lazily loaded on the next access.
-   * Re-throws any Hive Meta Store exceptions encountered during the create, these
-   * may vary depending on the Meta Store connection type (thrift vs direct db).
-   */
-  private void createDatabase(TCreateDbParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    Preconditions.checkNotNull(params);
-    String dbName = params.getDb();
-    Preconditions.checkState(dbName != null && !dbName.isEmpty(),
-        "Null or empty database name passed as argument to Catalog.createDatabase");
-    if (params.if_not_exists && catalog_.getDb(dbName) != null) {
-      LOG.debug("Skipping database creation because " + dbName + " already exists and " +
-          "IF NOT EXISTS was specified.");
-      resp.getResult().setVersion(catalog_.getCatalogVersion());
-      return;
-    }
-    org.apache.hadoop.hive.metastore.api.Database db =
-        new org.apache.hadoop.hive.metastore.api.Database();
-    db.setName(dbName);
-    if (params.getComment() != null) {
-      db.setDescription(params.getComment());
-    }
-    if (params.getLocation() != null) {
-      db.setLocationUri(params.getLocation());
-    }
-    LOG.debug("Creating database " + dbName);
-    Db newDb = null;
-    synchronized (metastoreDdlLock_) {
-      try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-        try {
-          msClient.getHiveClient().createDatabase(db);
-          newDb = catalog_.addDb(dbName, db);
-        } catch (AlreadyExistsException e) {
-          if (!params.if_not_exists) {
-            throw new ImpalaRuntimeException(
-                String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e);
-          }
-          LOG.debug(String.format("Ignoring '%s' when creating database %s because " +
-              "IF NOT EXISTS was specified.", e, dbName));
-          newDb = catalog_.getDb(dbName);
-          if (newDb == null) {
-            try {
-              org.apache.hadoop.hive.metastore.api.Database msDb =
-                  msClient.getHiveClient().getDatabase(dbName);
-              newDb = catalog_.addDb(dbName, msDb);
-            } catch (TException e1) {
-              throw new ImpalaRuntimeException(
-                  String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e1);
-            }
-          }
-        } catch (TException e) {
-          throw new ImpalaRuntimeException(
-              String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e);
-        }
-      }
-
-      Preconditions.checkNotNull(newDb);
-      TCatalogObject thriftDb = new TCatalogObject(
-          TCatalogObjectType.DATABASE, Catalog.INITIAL_CATALOG_VERSION);
-      thriftDb.setDb(newDb.toThrift());
-      thriftDb.setCatalog_version(newDb.getCatalogVersion());
-      resp.result.setUpdated_catalog_object_DEPRECATED(thriftDb);
-    }
-    resp.result.setVersion(
-        resp.result.getUpdated_catalog_object_DEPRECATED().getCatalog_version());
-  }
-
-  private TCatalogObject buildTCatalogFnObject(Function fn) {
-    TCatalogObject result = new TCatalogObject();
-    result.setType(TCatalogObjectType.FUNCTION);
-    result.setFn(fn.toThrift());
-    result.setCatalog_version(fn.getCatalogVersion());
-    return result;
-  }
-
- private void createFunction(TCreateFunctionParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    Function fn = Function.fromThrift(params.getFn());
-    LOG.debug(String.format("Adding %s: %s",
-        fn.getClass().getSimpleName(), fn.signatureString()));
-    boolean isPersistentJavaFn =
-        (fn.getBinaryType() == TFunctionBinaryType.JAVA) && fn.isPersistent();
-    synchronized (metastoreDdlLock_) {
-      Db db = catalog_.getDb(fn.dbName());
-      if (db == null) {
-        throw new CatalogException("Database: " + fn.dbName() + " does not exist.");
-      }
-      // Search for existing functions with the same name or signature that would
-      // conflict with the function being added.
-      for (Function function: db.getFunctions(fn.functionName())) {
-        if (isPersistentJavaFn || (function.isPersistent() &&
-            (function.getBinaryType() == TFunctionBinaryType.JAVA)) ||
-                function.compare(fn, Function.CompareMode.IS_INDISTINGUISHABLE)) {
-          if (!params.if_not_exists) {
-            throw new CatalogException("Function " + fn.functionName() +
-                " already exists.");
-          }
-          return;
-        }
-      }
-
-      List<TCatalogObject> addedFunctions = Lists.newArrayList();
-      if (isPersistentJavaFn) {
-        // For persistent Java functions we extract all supported function signatures from
-        // the corresponding Jar and add each signature to the catalog.
-        Preconditions.checkState(fn instanceof ScalarFunction);
-        org.apache.hadoop.hive.metastore.api.Function hiveFn =
-            ((ScalarFunction)fn).toHiveFunction();
-        List<Function> funcs = CatalogServiceCatalog.extractFunctions(fn.dbName(), hiveFn);
-        if (funcs.isEmpty()) {
-          throw new CatalogException(
-            "No compatible function signatures found in class: " + hiveFn.getClassName());
-        }
-        if (addJavaFunctionToHms(fn.dbName(), hiveFn, params.if_not_exists)) {
-          LOG.info("Funcs size:" + funcs.size());
-          for (Function addedFn: funcs) {
-            LOG.info(String.format("Adding function: %s.%s", addedFn.dbName(),
-                addedFn.signatureString()));
-            Preconditions.checkState(catalog_.addFunction(addedFn));
-            addedFunctions.add(buildTCatalogFnObject(addedFn));
-          }
-        }
-      } else {
-        if (catalog_.addFunction(fn)) {
-          // Flush DB changes to metastore
-          applyAlterDatabase(catalog_.getDb(fn.dbName()));
-          addedFunctions.add(buildTCatalogFnObject(fn));
-        }
-      }
-
-      if (!addedFunctions.isEmpty()) {
-        // Distinguish which result field to set based on the type of function being
-        // added for backwards compatibility. For example, BDR relies on a stable
-        // catalog Thrift API.
-        if (isPersistentJavaFn) {
-          // Only persistent Java UDFs can update multiple catalog objects.
-          resp.result.setUpdated_catalog_objects(addedFunctions);
-        } else {
-          Preconditions.checkState(addedFunctions.size() == 1);
-          resp.result.setUpdated_catalog_object_DEPRECATED(addedFunctions.get(0));
-        }
-        resp.result.setVersion(catalog_.getCatalogVersion());
-      }
-    }
-  }
-
-  private void createDataSource(TCreateDataSourceParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    if (LOG.isDebugEnabled()) { LOG.debug("Adding DATA SOURCE: " + params.toString()); }
-    DataSource dataSource = DataSource.fromThrift(params.getData_source());
-    if (catalog_.getDataSource(dataSource.getName()) != null) {
-      if (!params.if_not_exists) {
-        throw new ImpalaRuntimeException("Data source " + dataSource.getName() +
-            " already exists.");
-      }
-      // The user specified IF NOT EXISTS and the data source exists, just
-      // return the current catalog version.
-      resp.result.setVersion(catalog_.getCatalogVersion());
-      return;
-    }
-    catalog_.addDataSource(dataSource);
-    TCatalogObject addedObject = new TCatalogObject();
-    addedObject.setType(TCatalogObjectType.DATA_SOURCE);
-    addedObject.setData_source(dataSource.toThrift());
-    addedObject.setCatalog_version(dataSource.getCatalogVersion());
-    resp.result.setUpdated_catalog_object_DEPRECATED(addedObject);
-    resp.result.setVersion(dataSource.getCatalogVersion());
-  }
-
-  private void dropDataSource(TDropDataSourceParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    if (LOG.isDebugEnabled()) { LOG.debug("Drop DATA SOURCE: " + params.toString()); }
-    DataSource dataSource = catalog_.getDataSource(params.getData_source());
-    if (dataSource == null) {
-      if (!params.if_exists) {
-        throw new ImpalaRuntimeException("Data source " + params.getData_source() +
-            " does not exists.");
-      }
-      // The user specified IF EXISTS and the data source didn't exist, just
-      // return the current catalog version.
-      resp.result.setVersion(catalog_.getCatalogVersion());
-      return;
-    }
-    catalog_.removeDataSource(params.getData_source());
-    TCatalogObject removedObject = new TCatalogObject();
-    removedObject.setType(TCatalogObjectType.DATA_SOURCE);
-    removedObject.setData_source(dataSource.toThrift());
-    removedObject.setCatalog_version(dataSource.getCatalogVersion());
-    resp.result.setRemoved_catalog_object_DEPRECATED(removedObject);
-    resp.result.setVersion(dataSource.getCatalogVersion());
-  }
-
-  /**
-   * Drops all table and column stats from the target table in the HMS and
-   * updates the Impala catalog. Throws an ImpalaException if any errors are
-   * encountered as part of this operation. Acquires a lock on the modified table
-   * to protect against concurrent modifications.
-   */
-  private void dropStats(TDropStatsParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    Table table = getExistingTable(params.getTable_name().getDb_name(),
-        params.getTable_name().getTable_name());
-    Preconditions.checkNotNull(table);
-    catalog_.getLock().writeLock().lock();
-    synchronized(table) {
-      long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
-      catalog_.getLock().writeLock().unlock();
-      if (params.getPartition_spec() == null) {
-        // TODO: Report the number of updated partitions/columns to the user?
-        dropColumnStats(table);
-        dropTableStats(table);
-      } else {
-        HdfsPartition partition =
-            ((HdfsTable)table).getPartitionFromThriftPartitionSpec(
-                params.getPartition_spec());
-        if (partition == null) {
-          List<String> partitionDescription = Lists.newArrayList();
-          for (TPartitionKeyValue v: params.getPartition_spec()) {
-            partitionDescription.add(v.name + " = " + v.value);
-          }
-          throw new ImpalaRuntimeException("Could not find partition: " +
-              Joiner.on("/").join(partitionDescription));
-        }
-
-        if (partition.getPartitionStats() != null)  {
-          PartitionStatsUtil.deletePartStats(partition);
-          try {
-            applyAlterPartition(table, partition);
-          } finally {
-            partition.markDirty();
-          }
-        }
-      }
-
-      loadTableMetadata(table, newCatalogVersion, false, true, null);
-      addTableToCatalogUpdate(table, resp.result);
-    } // end of synchronization
-  }
-
-  /**
-   * Drops all column stats from the table in the HMS. Returns the number of columns
-   * that were updated as part of this operation.
-   */
-  private int dropColumnStats(Table table) throws ImpalaRuntimeException {
-    Preconditions.checkState(Thread.holdsLock(table));
-    int numColsUpdated = 0;
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      for (Column col: table.getColumns()) {
-        // Skip columns that don't have stats.
-        if (!col.getStats().hasStats()) continue;
-
-        try {
-          msClient.getHiveClient().deleteTableColumnStatistics(
-              table.getDb().getName(), table.getName(), col.getName());
-          ++numColsUpdated;
-        } catch (NoSuchObjectException e) {
-          // We don't care if the column stats do not exist, just ignore the exception.
-          // We would only expect to make it here if the Impala and HMS metadata
-          // diverged.
-        } catch (TException e) {
-          throw new ImpalaRuntimeException(
-              String.format(HMS_RPC_ERROR_FORMAT_STR,
-                  "delete_table_column_statistics"), e);
-        }
-      }
-    }
-    return numColsUpdated;
-  }
-
-  /**
-   * Drops all table and partition stats from this table in the HMS.
-   * Partitions are updated in batches of MAX_PARTITION_UPDATES_PER_RPC. Returns
-   * the number of partitions updated as part of this operation, or 1 if the table
-   * is unpartitioned.
-   */
-  private int dropTableStats(Table table) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(table));
-    // Delete the ROW_COUNT from the table (if it was set).
-    org.apache.hadoop.hive.metastore.api.Table msTbl = table.getMetaStoreTable();
-    int numTargetedPartitions = 0;
-    if (msTbl.getParameters().remove(StatsSetupConst.ROW_COUNT) != null) {
-      applyAlterTable(msTbl);
-      ++numTargetedPartitions;
-    }
-
-    if (!(table instanceof HdfsTable) || table.getNumClusteringCols() == 0) {
-      // If this is not an HdfsTable or if the table is not partitioned, there
-      // is no more work to be done so just return.
-      return numTargetedPartitions;
-    }
-
-    // Now clear the stats for all partitions in the table.
-    HdfsTable hdfsTable = (HdfsTable) table;
-    Preconditions.checkNotNull(hdfsTable);
-
-    // List of partitions that were modified as part of this operation.
-    List<HdfsPartition> modifiedParts = Lists.newArrayList();
-    for (HdfsPartition part: hdfsTable.getPartitions()) {
-      boolean isModified = false;
-      // The default partition is an Impala-internal abstraction and is not
-      // represented in the Hive Metastore.
-      if (part.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
-        continue;
-      }
-      if (part.getPartitionStats() != null) {
-        PartitionStatsUtil.deletePartStats(part);
-        isModified = true;
-      }
-
-      // Remove the ROW_COUNT parameter if it has been set.
-      if (part.getParameters().remove(StatsSetupConst.ROW_COUNT) != null) {
-        isModified = true;
-      }
-
-      if (isModified) modifiedParts.add(part);
-    }
-
-    bulkAlterPartitions(table.getDb().getName(), table.getName(), modifiedParts);
-    return modifiedParts.size();
-  }
-
-  /**
-   * Drops a database from the metastore and removes the database's metadata from the
-   * internal cache. Re-throws any Hive Meta Store exceptions encountered during
-   * the drop.
-   */
-  private void dropDatabase(TDropDbParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    Preconditions.checkNotNull(params);
-    Preconditions.checkState(params.getDb() != null && !params.getDb().isEmpty(),
-        "Null or empty database name passed as argument to Catalog.dropDatabase");
-
-    LOG.debug("Dropping database " + params.getDb());
-    Db db = catalog_.getDb(params.db);
-    if (db != null && db.numFunctions() > 0 && !params.cascade) {
-      throw new CatalogException("Database " + db.getName() + " is not empty");
-    }
-
-    TCatalogObject removedObject = new TCatalogObject();
-    synchronized (metastoreDdlLock_) {
-      try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-        msClient.getHiveClient().dropDatabase(
-            params.getDb(), true, params.if_exists, params.cascade);
-      } catch (TException e) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "dropDatabase"), e);
-      }
-      Db removedDb = catalog_.removeDb(params.getDb());
-      // If no db was removed as part of this operation just return the current catalog
-      // version.
-      if (removedDb == null) {
-        removedObject.setCatalog_version(catalog_.getCatalogVersion());
-      } else {
-        removedObject.setCatalog_version(removedDb.getCatalogVersion());
-      }
-    }
-    removedObject.setType(TCatalogObjectType.DATABASE);
-    removedObject.setDb(new TDatabase());
-    removedObject.getDb().setDb_name(params.getDb());
-    resp.result.setVersion(removedObject.getCatalog_version());
-    resp.result.setRemoved_catalog_object_DEPRECATED(removedObject);
-  }
-
-  /**
-   * Drops a table or view from the metastore and removes it from the catalog.
-   * Also drops all associated caching requests on the table and/or table's partitions,
-   * uncaching all table data. If params.purge is true, table data is permanently
-   * deleted.
-   */
-  private void dropTableOrView(TDropTableOrViewParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    TableName tableName = TableName.fromThrift(params.getTable_name());
-    Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    LOG.debug(String.format("Dropping table/view %s", tableName));
-
-    TCatalogObject removedObject = new TCatalogObject();
-    synchronized (metastoreDdlLock_) {
-
-      // Forward the DDL operation to the specified storage backend.
-      try {
-        org.apache.hadoop.hive.metastore.api.Table msTbl = getExistingTable(
-            tableName.getDb(), tableName.getTbl()).getMetaStoreTable();
-        DdlDelegate handler = createDdlDelegate(msTbl);
-        handler.dropTable();
-      } catch (TableNotFoundException | DatabaseNotFoundException e) {
-        // Do nothing
-      }
-
-      Db db = catalog_.getDb(params.getTable_name().db_name);
-      if (db == null) {
-        if (params.if_exists) return;
-        throw new CatalogException("Database does not exist: " +
-            params.getTable_name().db_name);
-      }
-      Table existingTbl = db.getTable(params.getTable_name().table_name);
-      if (existingTbl == null) {
-        if (params.if_exists) return;
-        throw new CatalogException("Table/View does not exist: " + tableName);
-      }
-      // Check to make sure we don't drop a view with "drop table" statement and
-      // vice versa. is_table field is marked optional in TDropTableOrViewParams to
-      // maintain catalog api compatibility.
-      // TODO: Remove params.isSetIs_table() check once catalog api compatibility is
-      // fixed.
-      if (params.isSetIs_table() && ((params.is_table && existingTbl instanceof View)
-          || (!params.is_table && !(existingTbl instanceof View)))) {
-        if (params.if_exists) return;
-        String errorMsg = "DROP " + (params.is_table ? "TABLE " : "VIEW ") +
-            "not allowed on a " + (params.is_table ? "view: " : "table: ") + tableName;
-        throw new CatalogException(errorMsg);
-      }
-      try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-        msClient.getHiveClient().dropTable(
-            tableName.getDb(), tableName.getTbl(), true, params.if_exists, params.purge);
-      } catch (TException e) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "dropTable"), e);
-      }
-
-      Table table = catalog_.removeTable(params.getTable_name().db_name,
-          params.getTable_name().table_name);
-      if (table != null) {
-        resp.result.setVersion(table.getCatalogVersion());
-        if (table instanceof HdfsTable) {
-          HdfsTable hdfsTable = (HdfsTable) table;
-          if (hdfsTable.isMarkedCached()) {
-            try {
-              HdfsCachingUtil.uncacheTbl(table.getMetaStoreTable());
-            } catch (Exception e) {
-              LOG.error("Unable to uncache table: " + table.getFullName(), e);
-            }
-          }
-          if (table.getNumClusteringCols() > 0) {
-            for (HdfsPartition partition: hdfsTable.getPartitions()) {
-              if (partition.isMarkedCached()) {
-                try {
-                  HdfsCachingUtil.uncachePartition(partition);
-                } catch (Exception e) {
-                  LOG.error("Unable to uncache partition: " +
-                      partition.getPartitionName(), e);
-                }
-              }
-            }
-          }
-        }
-      } else {
-        resp.result.setVersion(catalog_.getCatalogVersion());
-      }
-    }
-    removedObject.setType(TCatalogObjectType.TABLE);
-    removedObject.setTable(new TTable());
-    removedObject.getTable().setTbl_name(tableName.getTbl());
-    removedObject.getTable().setDb_name(tableName.getDb());
-    removedObject.setCatalog_version(resp.result.getVersion());
-    resp.result.setRemoved_catalog_object_DEPRECATED(removedObject);
-  }
-
-  /**
-   * Truncate a table by deleting all files in its partition directories, and dropping
-   * all column and table statistics. Acquires a table lock to protect against
-   * concurrent table modifications.
-   * TODO truncate specified partitions.
-   */
-  private void truncateTable(TTruncateParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    TTableName tblName = params.getTable_name();
-    Table table = null;
-    try {
-      table = getExistingTable(tblName.getDb_name(), tblName.getTable_name());
-    } catch (TableNotFoundException e) {
-      if (params.if_exists) return;
-      throw e;
-    }
-    Preconditions.checkNotNull(table);
-    if (!(table instanceof HdfsTable)) {
-      throw new CatalogException(
-          String.format("TRUNCATE TABLE not supported on non-HDFS table: %s",
-          table.getFullName()));
-    }
-    catalog_.getLock().writeLock().lock();
-    synchronized(table) {
-      long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
-      catalog_.getLock().writeLock().unlock();
-      try {
-        HdfsTable hdfsTable = (HdfsTable)table;
-        for (HdfsPartition part: hdfsTable.getPartitions()) {
-          if (part.isDefaultPartition()) continue;
-          FileSystemUtil.deleteAllVisibleFiles(new Path(part.getLocation()));
-        }
-
-        dropColumnStats(table);
-        dropTableStats(table);
-      } catch (Exception e) {
-        String fqName = tblName.db_name + "." + tblName.table_name;
-        throw new CatalogException(String.format("Failed to truncate table: %s.\n" +
-            "Table may be in a partially truncated state.", fqName), e);
-      }
-
-      loadTableMetadata(table, newCatalogVersion, true, true, null);
-      addTableToCatalogUpdate(table, resp.result);
-    } // end synchronization
-  }
-
-  private void dropFunction(TDropFunctionParams params, TDdlExecResponse resp)
-      throws ImpalaException {
-    FunctionName fName = FunctionName.fromThrift(params.fn_name);
-    synchronized (metastoreDdlLock_) {
-      Db db = catalog_.getDb(fName.getDb());
-      if (db == null) {
-        if (!params.if_exists) {
-            throw new CatalogException("Database: " + fName.getDb()
-                + " does not exist.");
-        }
-        return;
-      }
-      List<TCatalogObject> removedFunctions = Lists.newArrayList();
-      if (!params.isSetSignature()) {
-        dropJavaFunctionFromHms(fName.getDb(), fName.getFunction(), params.if_exists);
-        for (Function fn: db.getFunctions(fName.getFunction())) {
-          if (fn.getBinaryType() != TFunctionBinaryType.JAVA
-              || !fn.isPersistent()) {
-            continue;
-          }
-          Preconditions.checkNotNull(catalog_.removeFunction(fn));
-          removedFunctions.add(buildTCatalogFnObject(fn));
-        }
-      } else {
-        ArrayList<Type> argTypes = Lists.newArrayList();
-        for (TColumnType t: params.arg_types) {
-          argTypes.add(Type.fromThrift(t));
-        }
-        Function desc = new Function(fName, argTypes, Type.INVALID, false);
-        Function fn = catalog_.removeFunction(desc);
-        if (fn == null) {
-          if (!params.if_exists) {
-            throw new CatalogException(
-                "Function: " + desc.signatureString() + " does not exist.");
-          }
-        } else {
-          // Flush DB changes to metastore
-          applyAlterDatabase(catalog_.getDb(fn.dbName()));
-          removedFunctions.add(buildTCatalogFnObject(fn));
-        }
-      }
-
-      if (!removedFunctions.isEmpty()) {
-        // Distinguish which result field to set based on the type of functions removed
-        // for backwards compatibility. For example, BDR relies on a stable catalog
-        // Thrift API.
-        if (!params.isSetSignature()) {
-          // Removing all signatures of a persistent Java UDF.
-          resp.result.setRemoved_catalog_objects(removedFunctions);
-        } else {
-          Preconditions.checkState(removedFunctions.size() == 1);
-          resp.result.setRemoved_catalog_object_DEPRECATED(removedFunctions.get(0));
-        }
-      }
-      resp.result.setVersion(catalog_.getCatalogVersion());
-    }
-  }
-
-  /**
-   * Creates a new table in the metastore and adds an entry to the metadata cache to
-   * lazily load the new metadata on the next access. Re-throws any Hive Meta Store
-   * exceptions encountered during the create.
-   */
-  private boolean createTable(TCreateTableParams params, TDdlExecResponse response)
-      throws ImpalaException {
-    Preconditions.checkNotNull(params);
-    TableName tableName = TableName.fromThrift(params.getTable_name());
-    Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    Preconditions.checkState(params.getColumns() != null &&
-        params.getColumns().size() > 0,
-        "Null or empty column list given as argument to Catalog.createTable");
-
-    if (params.if_not_exists &&
-        catalog_.containsTable(tableName.getDb(), tableName.getTbl())) {
-      LOG.debug(String.format("Skipping table creation because %s already exists and " +
-          "IF NOT EXISTS was specified.", tableName));
-      response.getResult().setVersion(catalog_.getCatalogVersion());
-      return false;
-    }
-    org.apache.hadoop.hive.metastore.api.Table tbl =
-        createMetaStoreTable(params);
-    LOG.debug(String.format("Creating table %s", tableName));
-    return createTable(tbl, params.if_not_exists, params.getCache_op(),
-        params.getDistribute_by(), response);
-  }
-
-  /**
-   * Creates a new view in the metastore and adds an entry to the metadata cache to
-   * lazily load the new metadata on the next access. Re-throws any Metastore
-   * exceptions encountered during the create.
-   */
-  private void createView(TCreateOrAlterViewParams params, TDdlExecResponse response)
-      throws ImpalaException {
-    TableName tableName = TableName.fromThrift(params.getView_name());
-    Preconditions.checkState(tableName != null && tableName.isFullyQualified());
-    Preconditions.checkState(params.getColumns() != null &&
-        params.getColumns().size() > 0,
-          "Null or empty column list given as argument to DdlExecutor.createView");
-    if (params.if_not_exists &&
-        catalog_.containsTable(tableName.getDb(), tableName.getTbl())) {
-      LOG.debug(String.format("Skipping view creation because %s already exists and " +
-          "ifNotExists is true.", tableName));
-    }
-
-    // Create new view.
-    org.apache.hadoop.hive.metastore.api.Table view =
-        new org.apache.hadoop.hive.metastore.api.Table();
-    setViewAttributes(params, view);
-    LOG.debug(String.format("Creating view %s", tableName));
-    createTable(view, params.if_not_exists, null, null, response);
-  }
-
-  /**
-   * Creates a new table in the metastore based on the definition of an existing table.
-   * No data is copied as part of this process, it is a metadata only operation. If the
-   * creation succeeds, an entry is added to the metadata cache to lazily load the new
-   * table's metadata on the next access.
-   */
-  private void createTableLike(TCreateTableLikeParams params, TDdlExecResponse response)
-      throws ImpalaException {
-    Preconditions.checkNotNull(params);
-
-    THdfsFileFormat fileFormat =
-        params.isSetFile_format() ? params.getFile_format() : null;
-    String comment = params.isSetComment() ? params.getComment() : null;
-    TableName tblName = TableName.fromThrift(params.getTable_name());
-    TableName srcTblName = TableName.fromThrift(params.getSrc_table_name());
-    Preconditions.checkState(tblName != null && tblName.isFullyQualified());
-    Preconditions.checkState(srcTblName != null && srcTblName.isFullyQualified());
-
-    if (params.if_not_exists &&
-        catalog_.containsTable(tblName.getDb(), tblName.getTbl())) {
-      LOG.debug(String.format("Skipping table creation because %s already exists and " +
-          "IF NOT EXISTS was specified.", tblName));
-      response.getResult().setVersion(catalog_.getCatalogVersion());
-      return;
-    }
-    Table srcTable = getExistingTable(srcTblName.getDb(), srcTblName.getTbl());
-    org.apache.hadoop.hive.metastore.api.Table tbl =
-        srcTable.getMetaStoreTable().deepCopy();
-    tbl.setDbName(tblName.getDb());
-    tbl.setTableName(tblName.getTbl());
-    tbl.setOwner(params.getOwner());
-    if (tbl.getParameters() == null) {
-      tbl.setParameters(new HashMap<String, String>());
-    }
-    if (comment != null) {
-      tbl.getParameters().put("comment", comment);
-    }
-    // The EXTERNAL table property should not be copied from the old table.
-    if (params.is_external) {
-      tbl.setTableType(TableType.EXTERNAL_TABLE.toString());
-      tbl.putToParameters("EXTERNAL", "TRUE");
-    } else {
-      tbl.setTableType(TableType.MANAGED_TABLE.toString());
-      if (tbl.getParameters().containsKey("EXTERNAL")) {
-        tbl.getParameters().remove("EXTERNAL");
-      }
-    }
-
-    // We should not propagate hdfs caching parameters to the new table.
-    if (tbl.getParameters().containsKey(
-        HdfsCachingUtil.CACHE_DIR_ID_PROP_NAME)) {
-      tbl.getParameters().remove(HdfsCachingUtil.CACHE_DIR_ID_PROP_NAME);
-    }
-    if (tbl.getParameters().containsKey(
-        HdfsCachingUtil.CACHE_DIR_REPLICATION_PROP_NAME)) {
-      tbl.getParameters().remove(
-        HdfsCachingUtil.CACHE_DIR_REPLICATION_PROP_NAME);
-    }
-
-    // The LOCATION property should not be copied from the old table. If the location
-    // is null (the caller didn't specify a custom location) this will clear the value
-    // and the table will use the default table location from the parent database.
-    tbl.getSd().setLocation(params.getLocation());
-    if (fileFormat != null) {
-      setStorageDescriptorFileFormat(tbl.getSd(), fileFormat);
-    } else if (fileFormat == null && srcTable instanceof View) {
-      // Here, source table is a view which has no input format. So to be
-      // consistent with CREATE TABLE, default input format is assumed to be
-      // TEXT unless otherwise specified.
-      setStorageDescriptorFileFormat(tbl.getSd(), THdfsFileFormat.TEXT);
-    }
-    // Set the row count of this table to unknown.
-    tbl.putToParameters(StatsSetupConst.ROW_COUNT, "-1");
-    LOG.debug(String.format("Creating table %s LIKE %s", tblName, srcTblName));
-    createTable(tbl, params.if_not_exists, null, null, response);
-  }
-
-  /**
-   * Creates a new table in the HMS. If ifNotExists=true, no error will be thrown if
-   * the table already exists, otherwise an exception will be thrown.
-   * Accepts an optional 'cacheOp' param, which if specified will cache the table's
-   * HDFS location according to the 'cacheOp' spec after creation.
-   * Stores details of the operations (such as the resulting catalog version) in
-   * 'response' output parameter.
-   * Returns true if a new table was created as part of this call, false otherwise.
-   */
-  private boolean createTable(org.apache.hadoop.hive.metastore.api.Table newTable,
-      boolean ifNotExists, THdfsCachingOp cacheOp, List<TDistributeParam> distribute_by,
-      TDdlExecResponse response)
-      throws ImpalaException {
-    synchronized (metastoreDdlLock_) {
-
-      try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-        msClient.getHiveClient().createTable(newTable);
-        // If this table should be cached, and the table location was not specified by
-        // the user, an extra step is needed to read the table to find the location.
-        if (cacheOp != null && cacheOp.isSet_cached() &&
-            newTable.getSd().getLocation() == null) {
-          newTable = msClient.getHiveClient().getTable(newTable.getDbName(),
-              newTable.getTableName());
-        }
-      } catch (AlreadyExistsException e) {
-        if (!ifNotExists) {
-          throw new ImpalaRuntimeException(
-              String.format(HMS_RPC_ERROR_FORMAT_STR, "createTable"), e);
-        }
-        LOG.debug(String.format("Ignoring '%s' when creating table %s.%s because " +
-            "IF NOT EXISTS was specified.", e,
-            newTable.getDbName(), newTable.getTableName()));
-        return false;
-      } catch (TException e) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "createTable"), e);
-      }
-
-      // Forward the operation to a specific storage backend. If the operation fails,
-      // delete the just created hive table to avoid inconsistencies.
-      try {
-        createDdlDelegate(newTable).setDistributeParams(distribute_by).createTable();
-      } catch (ImpalaRuntimeException e) {
-        try (MetaStoreClient c = catalog_.getMetaStoreClient()) {
-          c.getHiveClient().dropTable(newTable.getDbName(), newTable.getTableName(),
-              false, ifNotExists);
-        } catch (Exception hE) {
-          throw new ImpalaRuntimeException(String.format(HMS_RPC_ERROR_FORMAT_STR,
-              "dropTable"), hE);
-        }
-        throw e;
-      }
-
-      // Submit the cache request and update the table metadata.
-      if (cacheOp != null && cacheOp.isSet_cached()) {
-        short replication = cacheOp.isSetReplication() ? cacheOp.getReplication() :
-            JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR;
-        long id = HdfsCachingUtil.submitCacheTblDirective(newTable,
-            cacheOp.getCache_pool_name(), replication);
-        catalog_.watchCacheDirs(Lists.<Long>newArrayList(id),
-            new TTableName(newTable.getDbName(), newTable.getTableName()));
-        applyAlterTable(newTable);
-      }
-      Table newTbl = catalog_.addTable(newTable.getDbName(), newTable.getTableName());
-      addTableToCatalogUpdate(newTbl, response.result);
-    }
-    return true;
-  }
-
-  /**
-   * Instantiate the appropriate DDL delegate for the table. If no known delegate is
-   * available for the table, returns a UnsupportedOpDelegate instance.
-   */
-  private DdlDelegate createDdlDelegate(org.apache.hadoop.hive.metastore.api.Table tab) {
-    if (KuduDdlDelegate.canHandle(tab)) return new KuduDdlDelegate(tab);
-    return new UnsupportedOpDelegate();
-  }
-
-  /**
-   * Sets the given params in the metastore table as appropriate for a view.
-   */
-  private void setViewAttributes(TCreateOrAlterViewParams params,
-      org.apache.hadoop.hive.metastore.api.Table view) {
-    view.setTableType(TableType.VIRTUAL_VIEW.toString());
-    view.setViewOriginalText(params.getOriginal_view_def());
-    view.setViewExpandedText(params.getExpanded_view_def());
-    view.setDbName(params.getView_name().getDb_name());
-    view.setTableName(params.getView_name().getTable_name());
-    view.setOwner(params.getOwner());
-    if (view.getParameters() == null) view.setParameters(new HashMap<String, String>());
-    if (params.isSetComment() &&  params.getComment() != null) {
-      view.getParameters().put("comment", params.getComment());
-    }
-
-    // Add all the columns to a new storage descriptor.
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(buildFieldSchemaList(params.getColumns()));
-    // Set a dummy SerdeInfo for Hive.
-    sd.setSerdeInfo(new SerDeInfo());
-    view.setSd(sd);
-  }
-
-  /**
-   * Appends one or more columns to the given table, optionally replacing all existing
-   * columns.
-   */
-  private void alterTableAddReplaceCols(Table tbl, List<TColumn> columns,
-      boolean replaceExistingCols) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    List<FieldSchema> newColumns = buildFieldSchemaList(columns);
-    if (replaceExistingCols) {
-      msTbl.getSd().setCols(newColumns);
-    } else {
-      // Append the new column to the existing list of columns.
-      for (FieldSchema fs: buildFieldSchemaList(columns)) {
-        msTbl.getSd().addToCols(fs);
-      }
-    }
-    applyAlterTable(msTbl);
-  }
-
-  /**
-   * Changes the column definition of an existing column. This can be used to rename a
-   * column, add a comment to a column, or change the datatype of a column.
-   */
-  private void alterTableChangeCol(Table tbl, String colName,
-      TColumn newCol) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    // Find the matching column name and change it.
-    Iterator<FieldSchema> iterator = msTbl.getSd().getColsIterator();
-    while (iterator.hasNext()) {
-      FieldSchema fs = iterator.next();
-      if (fs.getName().toLowerCase().equals(colName.toLowerCase())) {
-        fs.setName(newCol.getColumnName());
-        Type type = Type.fromThrift(newCol.getColumnType());
-        fs.setType(type.toSql().toLowerCase());
-        // Don't overwrite the existing comment unless a new comment is given
-        if (newCol.getComment() != null) {
-          fs.setComment(newCol.getComment());
-        }
-        break;
-      }
-      if (!iterator.hasNext()) {
-        throw new ColumnNotFoundException(String.format(
-            "Column name %s not found in table %s.", colName, tbl.getFullName()));
-      }
-    }
-    applyAlterTable(msTbl);
-  }
-
-  /**
-   * Adds a new partition to the given table in Hive. Also creates and adds
-   * a new HdfsPartition to the corresponding HdfsTable.
-   * If cacheOp is not null, the partition's location will be cached according
-   * to the cacheOp. If cacheOp is null, the new partition will inherit the
-   * the caching properties of the parent table.
-   * Returns null if the partition already exists in Hive and "IfNotExists"
-   * is true. Otherwise, returns the table object with an updated catalog version.
-   */
-  private Table alterTableAddPartition(Table tbl, List<TPartitionKeyValue> partitionSpec,
-      boolean ifNotExists, String location, THdfsCachingOp cacheOp)
-      throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    TableName tableName = tbl.getTableName();
-    if (ifNotExists && catalog_.containsHdfsPartition(tableName.getDb(),
-        tableName.getTbl(), partitionSpec)) {
-      LOG.debug(String.format("Skipping partition creation because (%s) already exists" +
-          " and ifNotExists is true.", Joiner.on(", ").join(partitionSpec)));
-      return null;
-    }
-
-    org.apache.hadoop.hive.metastore.api.Partition partition = null;
-    Table result = null;
-    List<Long> cacheIds = null;
-    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    Long parentTblCacheDirId =
-        HdfsCachingUtil.getCacheDirectiveId(msTbl.getParameters());
-
-    partition = createHmsPartition(partitionSpec, msTbl, tableName, location);
-
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      // Add the new partition.
-      partition = msClient.getHiveClient().add_partition(partition);
-      String cachePoolName = null;
-      Short replication = null;
-      if (cacheOp == null && parentTblCacheDirId != null) {
-        // The user didn't specify an explicit caching operation, inherit the value
-        // from the parent table.
-        cachePoolName = HdfsCachingUtil.getCachePool(parentTblCacheDirId);
-        Preconditions.checkNotNull(cachePoolName);
-        replication = HdfsCachingUtil.getCacheReplication(parentTblCacheDirId);
-        Preconditions.checkNotNull(replication);
-      } else if (cacheOp != null && cacheOp.isSet_cached()) {
-        // The user explicitly stated that this partition should be cached.
-        cachePoolName = cacheOp.getCache_pool_name();
-
-        // When the new partition should be cached and and no replication factor
-        // was specified, inherit the replication factor from the parent table if
-        // it is cached. If the parent is not cached and no replication factor is
-        // explicitly set, use the default value.
-        if (!cacheOp.isSetReplication() && parentTblCacheDirId != null) {
-          replication = HdfsCachingUtil.getCacheReplication(parentTblCacheDirId);
-        } else {
-          replication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
-        }
-      }
-      // If cache pool name is not null, it indicates this partition should be cached.
-      if (cachePoolName != null) {
-        long id = HdfsCachingUtil.submitCachePartitionDirective(partition,
-            cachePoolName, replication);
-        cacheIds = Lists.<Long>newArrayList(id);
-        // Update the partition metadata to include the cache directive id.
-        msClient.getHiveClient().alter_partition(partition.getDbName(),
-            partition.getTableName(), partition);
-      }
-      updateLastDdlTime(msTbl, msClient);
-    } catch (AlreadyExistsException e) {
-      if (!ifNotExists) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partition"), e);
-      }
-      LOG.debug(String.format("Ignoring '%s' when adding partition to %s because" +
-          " ifNotExists is true.", e, tableName));
-    } catch (TException e) {
-      throw new ImpalaRuntimeException(
-          String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partition"), e);
-    }
-    if (cacheIds != null) catalog_.watchCacheDirs(cacheIds, tableName.toThrift());
-    // Return the table object with an updated catalog version after creating the
-    // partition.
-    result = addHdfsPartition(tbl, partition);
-    return result;
-  }
-
-  /**
-   * Drops an existing partition from the given table in Hive. If the partition is cached,
-   * the associated cache directive will also be removed.
-   * Also drops the partition from its Hdfs table.
-   * Returns the table object with an updated catalog version. If the partition does not
-   * exist and "IfExists" is true, null is returned. If purge is true, partition data is
-   * permanently deleted.
-   */
-  private Table alterTableDropPartition(Table tbl,
-      List<TPartitionKeyValue> partitionSpec, boolean ifExists, boolean purge)
-      throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    TableName tableName = tbl.getTableName();
-    if (ifExists && !catalog_.containsHdfsPartition(tableName.getDb(), tableName.getTbl(),
-        partitionSpec)) {
-      LOG.debug(String.format("Skipping partition drop because (%s) does not exist " +
-          "and ifExists is true.", Joiner.on(", ").join(partitionSpec)));
-      return null;
-    }
-
-    HdfsPartition part = catalog_.getHdfsPartition(tableName.getDb(),
-        tableName.getTbl(), partitionSpec);
-    org.apache.hadoop.hive.metastore.api.Table msTbl =
-        tbl.getMetaStoreTable().deepCopy();
-    List<String> values = Lists.newArrayList();
-    // Need to add in the values in the same order they are defined in the table.
-    for (FieldSchema fs: msTbl.getPartitionKeys()) {
-      for (TPartitionKeyValue kv: partitionSpec) {
-        if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
-          values.add(kv.getValue());
-        }
-      }
-    }
-    PartitionDropOptions dropOptions = PartitionDropOptions.instance();
-    dropOptions.purgeData(purge);
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      msClient.getHiveClient().dropPartition(tableName.getDb(),
-          tableName.getTbl(), values, dropOptions);
-      updateLastDdlTime(msTbl, msClient);
-      if (part.isMarkedCached()) {
-        HdfsCachingUtil.uncachePartition(part);
-      }
-    } catch (NoSuchObjectException e) {
-      if (!ifExists) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e);
-      }
-      LOG.debug(String.format("Ignoring '%s' when dropping partition from %s because" +
-          " ifExists is true.", e, tableName));
-    } catch (TException e) {
-      throw new ImpalaRuntimeException(
-          String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e);
-    }
-    return catalog_.dropPartition(tbl, partitionSpec);
-  }
-
-  /**
-   * Removes a column from the given table.
-   */
-  private void alterTableDropCol(Table tbl, String colName) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    // Find the matching column name and remove it.
-    Iterator<FieldSchema> iterator = msTbl.getSd().getColsIterator();
-    while (iterator.hasNext()) {
-      FieldSchema fs = iterator.next();
-      if (fs.getName().toLowerCase().equals(colName.toLowerCase())) {
-        iterator.remove();
-        break;
-      }
-      if (!iterator.hasNext()) {
-        throw new ColumnNotFoundException(String.format(
-            "Column name %s not found in table %s.", colName, tbl.getFullName()));
-      }
-    }
-    applyAlterTable(msTbl);
-  }
-
-  /**
-   * Renames an existing table or view. Saves, drops and restores the column stats for
-   * tables renamed across databases to work around HIVE-9720/IMPALA-1711.
-   * After renaming the table/view, its metadata is marked as invalid and will be
-   * reloaded on the next access.
-   */
-  private void alterTableOrViewRename(Table oldTbl, TableName newTableName,
-      TDdlExecResponse response) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(oldTbl)
-        && catalog_.getLock().isWriteLockedByCurrentThread());
-    TableName tableName = oldTbl.getTableName();
-    org.apache.hadoop.hive.metastore.api.Table msTbl =
-        oldTbl.getMetaStoreTable().deepCopy();
-    msTbl.setDbName(newTableName.getDb());
-    msTbl.setTableName(newTableName.getTbl());
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      // Workaround for HIVE-9720/IMPALA-1711: When renaming a table with column
-      // stats across databases, we save, drop and restore the column stats because
-      // the HMS does not properly move them to the new table via alteration.
-      ColumnStatistics hmsColStats = null;
-      if (!msTbl.getTableType().equalsIgnoreCase(TableType.VIRTUAL_VIEW.toString())
-          && !tableName.getDb().equalsIgnoreCase(newTableName.getDb())) {
-        Map<String, TColumnStats> colStats = Maps.newHashMap();
-        for (Column c: oldTbl.getColumns()) {
-          colStats.put(c.getName(), c.getStats().toThrift());
-        }
-        hmsColStats = createHiveColStats(colStats, oldTbl);
-        // Set the new db/table.
-        hmsColStats.setStatsDesc(new ColumnStatisticsDesc(true, newTableName.getDb(),
-            newTableName.getTbl()));
-
-        LOG.trace(String.format("Dropping column stats for table %s being " +
-            "renamed to %s to workaround HIVE-9720.",
-            tableName.toString(), newTableName.toString()));
-        // Delete all column stats of the original table from the HMS.
-        msClient.getHiveClient().deleteTableColumnStatistics(
-            tableName.getDb(), tableName.getTbl(), null);
-      }
-
-      // Perform the table rename in any case.
-      msClient.getHiveClient().alter_table(tableName.getDb(), tableName.getTbl(), msTbl);
-
-      if (hmsColStats != null) {
-        LOG.trace(String.format("Restoring column stats for table %s being " +
-            "renamed to %s to workaround HIVE-9720.",
-            tableName.toString(), newTableName.toString()));
-        msClient.getHiveClient().updateTableColumnStatistics(hmsColStats);
-      }
-    } catch (TException e) {
-      throw new ImpalaRuntimeException(
-          String.format(HMS_RPC_ERROR_FORMAT_STR, "alter_table"), e);
-    }
-    // Rename the table in the Catalog and get the resulting catalog object.
-    // ALTER TABLE/VIEW RENAME is implemented as an ADD + DROP.
-    TCatalogObject newTable = TableToTCatalogObject(
-        catalog_.renameTable(tableName.toThrift(), newTableName.toThrift()));
-    TCatalogObject removedObject = new TCatalogObject();
-    removedObject.setType(TCatalogObjectType.TABLE);
-    removedObject.setTable(new TTable());
-    removedObject.getTable().setTbl_name(tableName.getTbl());
-    removedObject.getTable().setDb_name(tableName.getDb());
-    removedObject.setCatalog_version(newTable.getCatalog_version());
-    response.result.setRemoved_catalog_object_DEPRECATED(removedObject);
-    response.result.setUpdated_catalog_object_DEPRECATED(newTable);
-    response.result.setVersion(newTable.getCatalog_version());
-  }
-
-  /**
-   * Changes the file format for the given table or partition. This is a metadata only
-   * operation, existing table data will not be converted to the new format. After
-   * changing the file format the table metadata is marked as invalid and will be
-   * reloaded on the next access.
-   */
-  private boolean alterTableSetFileFormat(Table tbl,
-      List<TPartitionKeyValue> partitionSpec, THdfsFileFormat fileFormat)
-      throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    Preconditions.checkState(partitionSpec == null || !partitionSpec.isEmpty());
-    boolean reloadFileMetadata = false;
-    if (partitionSpec == null) {
-      org.apache.hadoop.hive.metastore.api.Table msTbl =
-          tbl.getMetaStoreTable().deepCopy();
-      setStorageDescriptorFileFormat(msTbl.getSd(), fileFormat);
-      applyAlterTable(msTbl);
-      reloadFileMetadata = true;
-    } else {
-      TableName tableName = tbl.getTableName();
-      HdfsPartition partition = catalog_.getHdfsPartition(
-          tableName.getDb(), tableName.getTbl(), partitionSpec);
-      Preconditions.checkNotNull(partition);
-      partition.setFileFormat(HdfsFileFormat.fromThrift(fileFormat));
-      try {
-        applyAlterPartition(tbl, partition);
-      } finally {
-        partition.markDirty();
-      }
-    }
-    return reloadFileMetadata;
-  }
-
-  /**
-   * Helper method for setting the file format on a given storage descriptor.
-   */
-  private static void setStorageDescriptorFileFormat(StorageDescriptor sd,
-      THdfsFileFormat fileFormat) {
-    StorageDescriptor tempSd =
-        HiveStorageDescriptorFactory.createSd(fileFormat, RowFormat.DEFAULT_ROW_FORMAT);
-    sd.setInputFormat(tempSd.getInputFormat());
-    sd.setOutputFormat(tempSd.getOutputFormat());
-    sd.getSerdeInfo().setSerializationLib(tempSd.getSerdeInfo().getSerializationLib());
-  }
-
-  /**
-   * Changes the HDFS storage location for the given table. This is a metadata only
-   * operation, existing table data will not be as part of changing the location.
-   */
-  private boolean alterTableSetLocation(Table tbl,
-      List<TPartitionKeyValue> partitionSpec, String location) throws ImpalaException {
-    Preconditions.checkState(Thread.holdsLock(tbl));
-    Preconditions.checkState(partitionSpec == null || !partitionSpec.isEmpty());
-    boolean reloadFileMetadata = false;
-    if (partitionSpec == null) {
-      org.apache.hadoop.hive.metastore.api.Table msTbl =
-          tbl.getMetaStoreTable().deepCopy();
-      if (msTbl.getPartitionKeysSize() == 0) reloadFileMetadata = true;
-      msTbl.getSd().setLocation(location);
-      applyAlterTable(msTbl);
-    } else {
-      TableName tableName = tbl.getTableName();
-      HdfsPartition partition = catalog_.getHdfsPartition(
-          tableName.getDb(), tableName.getTbl(), partiti

<TRUNCATED>


[26/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ScalarFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ScalarFunction.java b/fe/src/main/java/com/cloudera/impala/catalog/ScalarFunction.java
deleted file mode 100644
index b921015..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ScalarFunction.java
+++ /dev/null
@@ -1,295 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.hive.executor.UdfExecutor.JavaUdfDataType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TScalarFunction;
-import com.cloudera.impala.thrift.TSymbolType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Internal representation of a scalar function.
- */
-public class ScalarFunction extends Function {
-  // The name inside the binary at location_ that contains this particular
-  // function. e.g. org.example.MyUdf.class.
-  private String symbolName_;
-  private String prepareFnSymbol_;
-  private String closeFnSymbol_;
-
-  public ScalarFunction(FunctionName fnName, ArrayList<Type> argTypes, Type retType,
-      boolean hasVarArgs) {
-    super(fnName, argTypes, retType, hasVarArgs);
-  }
-
-  public ScalarFunction(FunctionName fnName, List<Type> argTypes,
-      Type retType, HdfsUri location, String symbolName, String initFnSymbol,
-      String closeFnSymbol) {
-    super(fnName, argTypes, retType, false);
-    setLocation(location);
-    setSymbolName(symbolName);
-    setPrepareFnSymbol(initFnSymbol);
-    setCloseFnSymbol(closeFnSymbol);
-  }
-
-  /**
-   * Creates a builtin scalar function. This is a helper that wraps a few steps
-   * into one call.
-   */
-  public static ScalarFunction createBuiltin(String name, ArrayList<Type> argTypes,
-      boolean hasVarArgs, Type retType, String symbol,
-      String prepareFnSymbol, String closeFnSymbol, boolean userVisible) {
-    Preconditions.checkNotNull(symbol);
-    ScalarFunction fn = new ScalarFunction(
-        new FunctionName(Catalog.BUILTINS_DB, name), argTypes, retType, hasVarArgs);
-    fn.setBinaryType(TFunctionBinaryType.BUILTIN);
-    fn.setUserVisible(userVisible);
-    fn.setIsPersistent(true);
-    try {
-      fn.symbolName_ = fn.lookupSymbol(symbol, TSymbolType.UDF_EVALUATE, null,
-          fn.hasVarArgs(), fn.getArgs());
-    } catch (AnalysisException e) {
-      // This should never happen
-      throw new RuntimeException("Builtin symbol '" + symbol + "'" + argTypes
-          + " not found!", e);
-    }
-    if (prepareFnSymbol != null) {
-      try {
-        fn.prepareFnSymbol_ = fn.lookupSymbol(prepareFnSymbol, TSymbolType.UDF_PREPARE);
-      } catch (AnalysisException e) {
-        // This should never happen
-        throw new RuntimeException(
-            "Builtin symbol '" + prepareFnSymbol + "' not found!", e);
-      }
-    }
-    if (closeFnSymbol != null) {
-      try {
-        fn.closeFnSymbol_ = fn.lookupSymbol(closeFnSymbol, TSymbolType.UDF_CLOSE);
-      } catch (AnalysisException e) {
-        // This should never happen
-        throw new RuntimeException(
-            "Builtin symbol '" + closeFnSymbol + "' not found!", e);
-      }
-    }
-    return fn;
-  }
-
-  /**
-   * Creates a Function object based on following inputs.
-   * @param dbName Name of fn's database
-   * @param fnName Name of the function
-   * @param fnClass Function symbol name
-   * @param fnArgs List of Class objects corresponding to the args of evaluate method
-   * @param fnRetType Class corresponding to the return type of the evaluate method
-   * @param hdfsUri URI of the jar holding the udf class.
-   * @return Function object corresponding to the hive udf if the parameters are
-   *         compatible, null otherwise.
-   */
-  public static Function fromHiveFunction(String dbName, String fnName, String fnClass,
-      Class<?>[] fnArgs, Class<?> fnRetType, String hdfsUri) {
-    // Check if the return type and the method arguments are supported.
-    // Currently we only support certain primitive types.
-    JavaUdfDataType javaRetType = JavaUdfDataType.getType(fnRetType);
-    if (javaRetType == JavaUdfDataType.INVALID_TYPE) return null;
-    List<Type> fnArgsList = Lists.newArrayList();
-    for (Class<?> argClass: fnArgs) {
-      JavaUdfDataType javaUdfType = JavaUdfDataType.getType(argClass);
-      if (javaUdfType == JavaUdfDataType.INVALID_TYPE) return null;
-      fnArgsList.add(new ScalarType(
-          PrimitiveType.fromThrift(javaUdfType.getPrimitiveType())));
-    }
-    ScalarType retType = new ScalarType(
-        PrimitiveType.fromThrift(javaRetType.getPrimitiveType()));
-    ScalarFunction fn = new ScalarFunction(new FunctionName(dbName, fnName), fnArgsList,
-        retType, new HdfsUri(hdfsUri), fnClass, null, null);
-    // We do not support varargs for Java UDFs, and neither does Hive.
-    fn.setHasVarArgs(false);
-    fn.setBinaryType(TFunctionBinaryType.JAVA);
-    fn.setIsPersistent(true);
-    return fn;
-  }
-
-  /**
-   * Creates a Hive function object from 'this'. Returns null if 'this' is not
-   * a Java UDF.
-   */
-  public org.apache.hadoop.hive.metastore.api.Function toHiveFunction() {
-    if (getBinaryType() != TFunctionBinaryType.JAVA) return null;
-    List<ResourceUri> resources = Lists.newArrayList(new ResourceUri(ResourceType.JAR,
-        getLocation().toString()));
-    return new org.apache.hadoop.hive.metastore.api.Function(functionName(), dbName(),
-        symbolName_, "", PrincipalType.USER, (int) (System.currentTimeMillis() / 1000),
-        FunctionType.JAVA, resources);
-  }
-
-  /**
-   * Creates a builtin scalar operator function. This is a helper that wraps a few steps
-   * into one call.
-   * TODO: this needs to be kept in sync with what generates the be operator
-   * implementations. (gen_functions.py). Is there a better way to coordinate this.
-   */
-  public static ScalarFunction createBuiltinOperator(String name,
-      ArrayList<Type> argTypes, Type retType) {
-    // Operators have a well defined symbol based on the function name and type.
-    // Convert Add(TINYINT, TINYINT) --> Add_TinyIntVal_TinyIntVal
-    String beFn = Character.toUpperCase(name.charAt(0)) + name.substring(1);
-    boolean usesDecimal = false;
-    for (int i = 0; i < argTypes.size(); ++i) {
-      switch (argTypes.get(i).getPrimitiveType()) {
-        case BOOLEAN:
-          beFn += "_BooleanVal";
-          break;
-        case TINYINT:
-          beFn += "_TinyIntVal";
-          break;
-        case SMALLINT:
-          beFn += "_SmallIntVal";
-          break;
-        case INT:
-          beFn += "_IntVal";
-          break;
-        case BIGINT:
-          beFn += "_BigIntVal";
-          break;
-        case FLOAT:
-          beFn += "_FloatVal";
-          break;
-        case DOUBLE:
-          beFn += "_DoubleVal";
-          break;
-        case STRING:
-        case VARCHAR:
-          beFn += "_StringVal";
-          break;
-        case CHAR:
-          beFn += "_Char";
-          break;
-        case TIMESTAMP:
-          beFn += "_TimestampVal";
-          break;
-        case DECIMAL:
-          beFn += "_DecimalVal";
-          usesDecimal = true;
-          break;
-        default:
-          Preconditions.checkState(false,
-              "Argument type not supported: " + argTypes.get(i).toSql());
-      }
-    }
-    String beClass = usesDecimal ? "DecimalOperators" : "Operators";
-    String symbol = "impala::" + beClass + "::" + beFn;
-    return createBuiltinOperator(name, symbol, argTypes, retType);
-  }
-
-  public static ScalarFunction createBuiltinOperator(String name, String symbol,
-      ArrayList<Type> argTypes, Type retType) {
-    return createBuiltin(name, symbol, argTypes, false, retType, false);
-  }
-
-  public static ScalarFunction createBuiltin(String name, String symbol,
-      ArrayList<Type> argTypes, boolean hasVarArgs, Type retType,
-      boolean userVisible) {
-    ScalarFunction fn = new ScalarFunction(
-        new FunctionName(Catalog.BUILTINS_DB, name), argTypes, retType, hasVarArgs);
-    fn.setBinaryType(TFunctionBinaryType.BUILTIN);
-    fn.setUserVisible(userVisible);
-    fn.setIsPersistent(true);
-    try {
-      fn.symbolName_ = fn.lookupSymbol(symbol, TSymbolType.UDF_EVALUATE, null,
-          fn.hasVarArgs(), fn.getArgs());
-    } catch (AnalysisException e) {
-      // This should never happen
-      Preconditions.checkState(false, "Builtin symbol '" + symbol + "'" + argTypes
-          + " not found!" + e.getStackTrace());
-      throw new RuntimeException("Builtin symbol not found!", e);
-    }
-    return fn;
-  }
-
-  /**
-   * Create a function that is used to search the catalog for a matching builtin. Only
-   * the fields necessary for matching function prototypes are specified.
-   */
-  public static ScalarFunction createBuiltinSearchDesc(String name, Type[] argTypes,
-      boolean hasVarArgs) {
-    ArrayList<Type> fnArgs =
-        (argTypes == null) ? new ArrayList<Type>() : Lists.newArrayList(argTypes);
-    ScalarFunction fn = new ScalarFunction(
-        new FunctionName(Catalog.BUILTINS_DB, name), fnArgs, Type.INVALID, hasVarArgs);
-    fn.setBinaryType(TFunctionBinaryType.BUILTIN);
-    return fn;
-  }
-
-  /**
-   * Static helper method to create a scalar function of given
-   * TFunctionBinaryType.
-   */
-  public static ScalarFunction createForTesting(String db,
-      String fnName, ArrayList<Type> args, Type retType, String uriPath,
-      String symbolName, String initFnSymbol, String closeFnSymbol,
-      TFunctionBinaryType type) {
-    ScalarFunction fn = new ScalarFunction(new FunctionName(db, fnName), args,
-        retType, new HdfsUri(uriPath), symbolName, initFnSymbol, closeFnSymbol);
-    fn.setBinaryType(type);
-    fn.setIsPersistent(true);
-    return fn;
-  }
-
-  public void setSymbolName(String s) { symbolName_ = s; }
-  public void setPrepareFnSymbol(String s) { prepareFnSymbol_ = s; }
-  public void setCloseFnSymbol(String s) { closeFnSymbol_ = s; }
-
-  public String getSymbolName() { return symbolName_; }
-  public String getPrepareFnSymbol() { return prepareFnSymbol_; }
-  public String getCloseFnSymbol() { return closeFnSymbol_; }
-
-  @Override
-  public String toSql(boolean ifNotExists) {
-    StringBuilder sb = new StringBuilder("CREATE FUNCTION ");
-    if (ifNotExists) sb.append("IF NOT EXISTS ");
-    sb.append(dbName() + "." + signatureString() + "\n")
-      .append(" RETURNS " + getReturnType() + "\n")
-      .append(" LOCATION '" + getLocation() + "'\n")
-      .append(" SYMBOL='" + getSymbolName() + "'\n");
-    return sb.toString();
-  }
-
-  @Override
-  public TFunction toThrift() {
-    TFunction fn = super.toThrift();
-    fn.setScalar_fn(new TScalarFunction());
-    fn.getScalar_fn().setSymbol(symbolName_);
-    if (prepareFnSymbol_ != null) fn.getScalar_fn().setPrepare_fn_symbol(prepareFnSymbol_);
-    if (closeFnSymbol_ != null) fn.getScalar_fn().setClose_fn_symbol(closeFnSymbol_);
-    return fn;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ScalarType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ScalarType.java b/fe/src/main/java/com/cloudera/impala/catalog/ScalarType.java
deleted file mode 100644
index ae955f8..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ScalarType.java
+++ /dev/null
@@ -1,478 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.cloudera.impala.analysis.TypesUtil;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Describes a scalar type. For most types this class just wraps a PrimitiveType enum,
- * but for types like CHAR and DECIMAL, this class contain additional information.
- *
- * Scalar types have a few ways they can be compared to other scalar types. They can be:
- *   1. completely identical,
- *   2. implicitly castable (convertible without loss of precision)
- *   3. subtype. For example, in the case of decimal, a type can be decimal(*, *)
- *   indicating that any decimal type is a subtype of the decimal type.
- */
-public class ScalarType extends Type {
-  private final PrimitiveType type_;
-
-  // Only used for type CHAR.
-  private int len_;
-
-  // Only used if type is DECIMAL. -1 (for both) is used to represent a
-  // decimal with any precision and scale.
-  // It is invalid to have one by -1 and not the other.
-  // TODO: we could use that to store DECIMAL(8,*), indicating a decimal
-  // with 8 digits of precision and any valid ([0-8]) scale.
-  private int precision_;
-  private int scale_;
-
-  // SQL allows the engine to pick the default precision. We pick the largest
-  // precision that is supported by the smallest decimal type in the BE (4 bytes).
-  public static final int DEFAULT_PRECISION = 9;
-  public static final int DEFAULT_SCALE = 0; // SQL standard
-
-  // Longest supported VARCHAR and CHAR, chosen to match Hive.
-  public static final int MAX_VARCHAR_LENGTH = 65355;
-  public static final int MAX_CHAR_LENGTH = 255;
-
-  // Longest CHAR that we in line in the tuple.
-  // Keep consistent with backend ColumnType::CHAR_INLINE_LENGTH
-  public static final int CHAR_INLINE_LENGTH = 128;
-
-  // Hive, mysql, sql server standard.
-  public static final int MAX_PRECISION = 38;
-  public static final int MAX_SCALE = MAX_PRECISION;
-
-  protected ScalarType(PrimitiveType type) {
-    type_ = type;
-  }
-
-  public static ScalarType createType(PrimitiveType type) {
-    switch (type) {
-      case INVALID_TYPE: return INVALID;
-      case NULL_TYPE: return NULL;
-      case BOOLEAN: return BOOLEAN;
-      case SMALLINT: return SMALLINT;
-      case TINYINT: return TINYINT;
-      case INT: return INT;
-      case BIGINT: return BIGINT;
-      case FLOAT: return FLOAT;
-      case DOUBLE: return DOUBLE;
-      case STRING: return STRING;
-      case VARCHAR: return createVarcharType();
-      case BINARY: return BINARY;
-      case TIMESTAMP: return TIMESTAMP;
-      case DATE: return DATE;
-      case DATETIME: return DATETIME;
-      case DECIMAL: return (ScalarType) createDecimalType();
-      default:
-        Preconditions.checkState(false);
-        return NULL;
-    }
-  }
-
-  public static ScalarType createCharType(int len) {
-    ScalarType type = new ScalarType(PrimitiveType.CHAR);
-    type.len_ = len;
-    return type;
-  }
-
-  public static ScalarType createDecimalType() { return DEFAULT_DECIMAL; }
-
-  public static ScalarType createDecimalType(int precision) {
-    return createDecimalType(precision, DEFAULT_SCALE);
-  }
-
-  public static ScalarType createDecimalType(int precision, int scale) {
-    Preconditions.checkState(precision >= 0); // Enforced by parser
-    Preconditions.checkState(scale >= 0); // Enforced by parser.
-    ScalarType type = new ScalarType(PrimitiveType.DECIMAL);
-    type.precision_ = precision;
-    type.scale_ = scale;
-    return type;
-  }
-
-  // Identical to createDecimalType except that higher precisions are truncated
-  // to the max storable precision. The BE will report overflow in these cases
-  // (think of this as adding ints to BIGINT but BIGINT can still overflow).
-  public static ScalarType createDecimalTypeInternal(int precision, int scale) {
-    ScalarType type = new ScalarType(PrimitiveType.DECIMAL);
-    type.precision_ = Math.min(precision, MAX_PRECISION);
-    type.scale_ = Math.min(type.precision_, scale);
-    return type;
-  }
-
-  public static ScalarType createVarcharType(int len) {
-    // length checked in analysis
-    ScalarType type = new ScalarType(PrimitiveType.VARCHAR);
-    type.len_ = len;
-    return type;
-  }
-
-  public static ScalarType createVarcharType() {
-    return DEFAULT_VARCHAR;
-  }
-
-  @Override
-  public String toString() {
-    if (type_ == PrimitiveType.CHAR) {
-      if (isWildcardChar()) return "CHAR(*)";
-      return "CHAR(" + len_ + ")";
-    } else  if (type_ == PrimitiveType.DECIMAL) {
-      if (isWildcardDecimal()) return "DECIMAL(*,*)";
-      return "DECIMAL(" + precision_ + "," + scale_ + ")";
-    } else if (type_ == PrimitiveType.VARCHAR) {
-      if (isWildcardVarchar()) return "VARCHAR(*)";
-      return "VARCHAR(" + len_ + ")";
-    }
-    return type_.toString();
-  }
-
-  @Override
-  public String toSql(int depth) {
-    if (depth >= MAX_NESTING_DEPTH) return "...";
-    switch(type_) {
-      case BINARY: return type_.toString();
-      case VARCHAR:
-      case CHAR:
-         return type_.toString() + "(" + len_ + ")";
-      case DECIMAL:
-        return String.format("%s(%s,%s)", type_.toString(), precision_, scale_);
-      default: return type_.toString();
-    }
-  }
-
-  @Override
-  protected String prettyPrint(int lpad) {
-    return StringUtils.repeat(' ', lpad) + toSql();
-  }
-
-  @Override
-  public void toThrift(TColumnType container) {
-    TTypeNode node = new TTypeNode();
-    container.types.add(node);
-    switch(type_) {
-      case VARCHAR:
-      case CHAR: {
-        node.setType(TTypeNodeType.SCALAR);
-        TScalarType scalarType = new TScalarType();
-        scalarType.setType(type_.toThrift());
-        scalarType.setLen(len_);
-        node.setScalar_type(scalarType);
-        break;
-      }
-      case DECIMAL: {
-        node.setType(TTypeNodeType.SCALAR);
-        TScalarType scalarType = new TScalarType();
-        scalarType.setType(type_.toThrift());
-        scalarType.setScale(scale_);
-        scalarType.setPrecision(precision_);
-        node.setScalar_type(scalarType);
-        break;
-      }
-      default: {
-        node.setType(TTypeNodeType.SCALAR);
-        TScalarType scalarType = new TScalarType();
-        scalarType.setType(type_.toThrift());
-        node.setScalar_type(scalarType);
-        break;
-      }
-    }
-  }
-
-  public static Type[] toColumnType(PrimitiveType[] types) {
-    Type result[] = new Type[types.length];
-    for (int i = 0; i < types.length; ++i) {
-      result[i] = createType(types[i]);
-    }
-    return result;
-  }
-
-  public int decimalPrecision() {
-    Preconditions.checkState(type_ == PrimitiveType.DECIMAL);
-    return precision_;
-  }
-
-  public int decimalScale() {
-    Preconditions.checkState(type_ == PrimitiveType.DECIMAL);
-    return scale_;
-  }
-
-  @Override
-  public PrimitiveType getPrimitiveType() { return type_; }
-  public int ordinal() { return type_.ordinal(); }
-  public int getLength() { return len_; }
-
-  @Override
-  public boolean isWildcardDecimal() {
-    return type_ == PrimitiveType.DECIMAL && precision_ == -1 && scale_ == -1;
-  }
-
-  @Override
-  public boolean isWildcardVarchar() {
-    return type_ == PrimitiveType.VARCHAR && len_ == -1;
-  }
-
-  @Override
-  public boolean isWildcardChar() {
-    return type_ == PrimitiveType.CHAR && len_ == -1;
-  }
-
-  /**
-   *  Returns true if this type is a fully specified (not wild card) decimal.
-   */
-  @Override
-  public boolean isFullySpecifiedDecimal() {
-    if (!isDecimal()) return false;
-    if (isWildcardDecimal()) return false;
-    if (precision_ <= 0 || precision_ > MAX_PRECISION) return false;
-    if (scale_ < 0 || scale_ > precision_) return false;
-    return true;
-  }
-
-  @Override
-  public boolean isFixedLengthType() {
-    return type_ == PrimitiveType.BOOLEAN || type_ == PrimitiveType.TINYINT
-        || type_ == PrimitiveType.SMALLINT || type_ == PrimitiveType.INT
-        || type_ == PrimitiveType.BIGINT || type_ == PrimitiveType.FLOAT
-        || type_ == PrimitiveType.DOUBLE || type_ == PrimitiveType.DATE
-        || type_ == PrimitiveType.DATETIME || type_ == PrimitiveType.TIMESTAMP
-        || type_ == PrimitiveType.CHAR || type_ == PrimitiveType.DECIMAL;
-  }
-
-  @Override
-  public boolean isSupported() {
-    return !getUnsupportedTypes().contains(this);
-  }
-
-  @Override
-  public boolean supportsTablePartitioning() {
-    if (!isSupported() || isComplexType() || type_ == PrimitiveType.TIMESTAMP) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public int getSlotSize() {
-    switch (type_) {
-      case CHAR:
-        if (len_ > CHAR_INLINE_LENGTH || len_ == 0) return STRING.getSlotSize();
-        return len_;
-      case DECIMAL: return TypesUtil.getDecimalSlotSize(this);
-      default:
-        return type_.getSlotSize();
-    }
-  }
-
-  /**
-   * Returns true if this object is of type t.
-   * Handles wildcard types. That is, if t is the wildcard type variant
-   * of 'this', returns true.
-   */
-  @Override
-  public boolean matchesType(Type t) {
-    if (equals(t)) return true;
-    if (!t.isScalarType()) return false;
-    ScalarType scalarType = (ScalarType) t;
-    if (type_ == PrimitiveType.VARCHAR && scalarType.isWildcardVarchar()) {
-      Preconditions.checkState(!isWildcardVarchar());
-      return true;
-    }
-    if (type_ == PrimitiveType.CHAR && scalarType.isWildcardChar()) {
-      Preconditions.checkState(!isWildcardChar());
-      return true;
-    }
-    if (isDecimal() && scalarType.isWildcardDecimal()) {
-      Preconditions.checkState(!isWildcardDecimal());
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof ScalarType)) return false;
-    ScalarType other = (ScalarType)o;
-    if (type_ != other.type_) return false;
-    if (type_ == PrimitiveType.CHAR) return len_ == other.len_;
-    if (type_ == PrimitiveType.VARCHAR) return len_ == other.len_;
-    if (type_ == PrimitiveType.DECIMAL) {
-      return precision_ == other.precision_ && scale_ == other.scale_;
-    }
-    return true;
-  }
-
-  public Type getMaxResolutionType() {
-    if (isIntegerType()) {
-      return ScalarType.BIGINT;
-    // Timestamps get summed as DOUBLE for AVG.
-    } else if (isFloatingPointType() || type_ == PrimitiveType.TIMESTAMP) {
-      return ScalarType.DOUBLE;
-    } else if (isNull()) {
-      return ScalarType.NULL;
-    } else if (isDecimal()) {
-      return createDecimalTypeInternal(MAX_PRECISION, scale_);
-    } else {
-      return ScalarType.INVALID;
-    }
-  }
-
-  public ScalarType getNextResolutionType() {
-    Preconditions.checkState(isNumericType() || isNull());
-    if (type_ == PrimitiveType.DOUBLE || type_ == PrimitiveType.BIGINT || isNull()) {
-      return this;
-    } else if (type_ == PrimitiveType.DECIMAL) {
-      return createDecimalTypeInternal(MAX_PRECISION, scale_);
-    }
-    return createType(PrimitiveType.values()[type_.ordinal() + 1]);
-  }
-
-  /**
-   * Returns the smallest decimal type that can safely store this type. Returns
-   * INVALID if this type cannot be stored as a decimal.
-   */
-  public ScalarType getMinResolutionDecimal() {
-    switch (type_) {
-      case NULL_TYPE: return Type.NULL;
-      case DECIMAL: return this;
-      case TINYINT: return createDecimalType(3);
-      case SMALLINT: return createDecimalType(5);
-      case INT: return createDecimalType(10);
-      case BIGINT: return createDecimalType(19);
-      case FLOAT: return createDecimalTypeInternal(MAX_PRECISION, 9);
-      case DOUBLE: return createDecimalTypeInternal(MAX_PRECISION, 17);
-      default: return ScalarType.INVALID;
-    }
-  }
-
-  /**
-   * Returns true if this decimal type is a supertype of the other decimal type.
-   * e.g. (10,3) is a supertype of (3,3) but (5,4) is not a supertype of (3,0).
-   * To be a super type of another decimal, the number of digits before and after
-   * the decimal point must be greater or equal.
-   */
-  public boolean isSupertypeOf(ScalarType o) {
-    Preconditions.checkState(isDecimal());
-    Preconditions.checkState(o.isDecimal());
-    if (isWildcardDecimal()) return true;
-    if (o.isWildcardDecimal()) return false;
-    return scale_ >= o.scale_ && precision_ - scale_ >= o.precision_ - o.scale_;
-  }
-
-  /**
-   * Return type t such that values from both t1 and t2 can be assigned to t.
-   * If strict, only return types when there will be no loss of precision.
-   * Returns INVALID_TYPE if there is no such type or if any of t1 and t2
-   * is INVALID_TYPE.
-   */
-  public static ScalarType getAssignmentCompatibleType(ScalarType t1,
-      ScalarType t2, boolean strict) {
-    if (!t1.isValid() || !t2.isValid()) return INVALID;
-    if (t1.equals(t2)) return t1;
-    if (t1.isNull()) return t2;
-    if (t2.isNull()) return t1;
-
-    if (t1.type_ == PrimitiveType.VARCHAR || t2.type_ == PrimitiveType.VARCHAR) {
-      if (t1.type_ == PrimitiveType.STRING || t2.type_ == PrimitiveType.STRING) {
-        return STRING;
-      }
-      if (t1.isStringType() && t2.isStringType()) {
-        return createVarcharType(Math.max(t1.len_, t2.len_));
-      }
-      return INVALID;
-    }
-
-    if (t1.type_ == PrimitiveType.CHAR || t2.type_ == PrimitiveType.CHAR) {
-      Preconditions.checkState(t1.type_ != PrimitiveType.VARCHAR);
-      Preconditions.checkState(t2.type_ != PrimitiveType.VARCHAR);
-      if (t1.type_ == PrimitiveType.STRING || t2.type_ == PrimitiveType.STRING) {
-        return STRING;
-      }
-      if (t1.type_ == PrimitiveType.CHAR && t2.type_ == PrimitiveType.CHAR) {
-        return createCharType(Math.max(t1.len_, t2.len_));
-      }
-      return INVALID;
-    }
-
-    if (t1.isDecimal() || t2.isDecimal()) {
-      // The case of decimal and float/double must be handled carefully. There are two
-      // modes: strict and non-strict. In non-strict mode, we convert to the floating
-      // point type, since it can contain a larger range of values than any decimal (but
-      // has lower precision in some parts of its range), so it is generally better.
-      // In strict mode, we avoid conversion in either direction because there are also
-      // decimal values (e.g. 0.1) that cannot be exactly represented in binary
-      // floating point.
-      // TODO: it might make sense to promote to double in many cases, but this would
-      // require more work elsewhere to avoid breaking things, e.g. inserting decimal
-      // literals into float columns.
-      if (t1.isFloatingPointType()) return strict ? INVALID : t1;
-      if (t2.isFloatingPointType()) return strict ? INVALID : t2;
-
-      // Allow casts between decimal and numeric types by converting
-      // numeric types to the containing decimal type.
-      ScalarType t1Decimal = t1.getMinResolutionDecimal();
-      ScalarType t2Decimal = t2.getMinResolutionDecimal();
-      if (t1Decimal.isInvalid() || t2Decimal.isInvalid()) return Type.INVALID;
-      Preconditions.checkState(t1Decimal.isDecimal());
-      Preconditions.checkState(t2Decimal.isDecimal());
-
-      if (t1Decimal.equals(t2Decimal)) {
-        Preconditions.checkState(!(t1.isDecimal() && t2.isDecimal()));
-        // The containing decimal type for a non-decimal type is always an exclusive
-        // upper bound, therefore the decimal has higher precision.
-        return t1Decimal;
-      }
-      if (t1Decimal.isSupertypeOf(t2Decimal)) return t1;
-      if (t2Decimal.isSupertypeOf(t1Decimal)) return t2;
-      return TypesUtil.getDecimalAssignmentCompatibleType(t1Decimal, t2Decimal);
-    }
-
-    PrimitiveType smallerType =
-        (t1.type_.ordinal() < t2.type_.ordinal() ? t1.type_ : t2.type_);
-    PrimitiveType largerType =
-        (t1.type_.ordinal() > t2.type_.ordinal() ? t1.type_ : t2.type_);
-    PrimitiveType result = null;
-    if (strict) {
-      result = strictCompatibilityMatrix[smallerType.ordinal()][largerType.ordinal()];
-    }
-    if (result == null) {
-      result = compatibilityMatrix[smallerType.ordinal()][largerType.ordinal()];
-    }
-    Preconditions.checkNotNull(result);
-    return createType(result);
-  }
-
-  /**
-   * Returns true t1 can be implicitly cast to t2, false otherwise.
-   * If strict is true, only consider casts that result in no loss of precision.
-   */
-  public static boolean isImplicitlyCastable(ScalarType t1, ScalarType t2,
-      boolean strict) {
-    return getAssignmentCompatibleType(t1, t2, strict).matchesType(t2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/StructField.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/StructField.java b/fe/src/main/java/com/cloudera/impala/catalog/StructField.java
deleted file mode 100644
index 4b9cb80..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/StructField.java
+++ /dev/null
@@ -1,92 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
-
-/**
- * TODO: Support comments for struct fields. The Metastore does not properly store
- * comments of struct fields. We set comment_ to null to avoid compatibility issues.
- */
-public class StructField {
-  protected final String name_;
-  protected final Type type_;
-  protected final String comment_;
-  protected int position_;  // in struct
-
-  public StructField(String name, Type type, String comment) {
-    name_ = name;
-    type_ = type;
-    comment_ = comment;
-  }
-
-  public StructField(String name, Type type) {
-    this(name, type, null);
-  }
-
-  public String getComment() { return comment_; }
-  public String getName() { return name_; }
-  public Type getType() { return type_; }
-  public int getPosition() { return position_; }
-  public void setPosition(int position) { position_ = position; }
-
-  public String toSql(int depth) {
-    String typeSql = (depth < Type.MAX_NESTING_DEPTH) ? type_.toSql(depth) : "...";
-    StringBuilder sb = new StringBuilder(name_);
-    if (type_ != null) sb.append(":" + typeSql);
-    if (comment_ != null) sb.append(String.format(" COMMENT '%s'", comment_));
-    return sb.toString();
-  }
-
-  /**
-   * Pretty prints this field with lpad number of leading spaces.
-   * Calls prettyPrint(lpad) on this field's type.
-   */
-  public String prettyPrint(int lpad) {
-    String leftPadding = StringUtils.repeat(' ', lpad);
-    StringBuilder sb = new StringBuilder(leftPadding + name_);
-    if (type_ != null) {
-      // Pass in the padding to make sure nested fields are aligned properly,
-      // even if we then strip the top-level padding.
-      String typeStr = type_.prettyPrint(lpad);
-      typeStr = typeStr.substring(lpad);
-      sb.append(":" + typeStr);
-    }
-    if (comment_ != null) sb.append(String.format(" COMMENT '%s'", comment_));
-    return sb.toString();
-  }
-
-  public void toThrift(TColumnType container, TTypeNode node) {
-    TStructField field = new TStructField();
-    field.setName(name_);
-    if (comment_ != null) field.setComment(comment_);
-    node.struct_fields.add(field);
-    type_.toThrift(container);
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof StructField)) return false;
-    StructField otherStructField = (StructField) other;
-    return otherStructField.name_.equals(name_) && otherStructField.type_.equals(type_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/StructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/StructType.java b/fe/src/main/java/com/cloudera/impala/catalog/StructType.java
deleted file mode 100644
index 3600b6b..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/StructType.java
+++ /dev/null
@@ -1,107 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Describes a STRUCT type. STRUCT types have a list of named struct fields.
- */
-public class StructType extends Type {
-  private final HashMap<String, StructField> fieldMap_ = Maps.newHashMap();
-  private final ArrayList<StructField> fields_;
-
-  public StructType(ArrayList<StructField> fields) {
-    Preconditions.checkNotNull(fields);
-    fields_ = fields;
-    for (int i = 0; i < fields_.size(); ++i) {
-      fields_.get(i).setPosition(i);
-      fieldMap_.put(fields_.get(i).getName().toLowerCase(), fields_.get(i));
-    }
-  }
-
-  public StructType() {
-    fields_ = Lists.newArrayList();
-  }
-
-  @Override
-  public String toSql(int depth) {
-    if (depth >= MAX_NESTING_DEPTH) return "STRUCT<...>";
-    ArrayList<String> fieldsSql = Lists.newArrayList();
-    for (StructField f: fields_) fieldsSql.add(f.toSql(depth + 1));
-    return String.format("STRUCT<%s>", Joiner.on(",").join(fieldsSql));
-  }
-
-  @Override
-  protected String prettyPrint(int lpad) {
-    String leftPadding = StringUtils.repeat(' ', lpad);
-    ArrayList<String> fieldsSql = Lists.newArrayList();
-    for (StructField f: fields_) fieldsSql.add(f.prettyPrint(lpad + 2));
-    return String.format("%sSTRUCT<\n%s\n%s>",
-        leftPadding, Joiner.on(",\n").join(fieldsSql), leftPadding);
-  }
-
-  public void addField(StructField field) {
-    field.setPosition(fields_.size());
-    fields_.add(field);
-    fieldMap_.put(field.getName().toLowerCase(), field);
-  }
-
-  public ArrayList<StructField> getFields() { return fields_; }
-
-  public StructField getField(String fieldName) {
-    return fieldMap_.get(fieldName.toLowerCase());
-  }
-
-  public void clearFields() {
-    fields_.clear();
-    fieldMap_.clear();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof StructType)) return false;
-    StructType otherStructType = (StructType) other;
-    return otherStructType.getFields().equals(fields_);
-  }
-
-  @Override
-  public void toThrift(TColumnType container) {
-    TTypeNode node = new TTypeNode();
-    container.types.add(node);
-    Preconditions.checkNotNull(fields_);
-    Preconditions.checkNotNull(!fields_.isEmpty());
-    node.setType(TTypeNodeType.STRUCT);
-    node.setStruct_fields(new ArrayList<TStructField>());
-    for (StructField field: fields_) {
-      field.toThrift(container, node);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Table.java b/fe/src/main/java/com/cloudera/impala/catalog/Table.java
deleted file mode 100644
index f794d7e..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Table.java
+++ /dev/null
@@ -1,484 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnDescriptor;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Base class for table metadata.
- *
- * This includes the concept of clustering columns, which are columns by which the table
- * data is physically clustered. In other words, if two rows share the same values
- * for the clustering columns, those two rows are most likely colocated. Note that this
- * is more general than Hive's CLUSTER BY ... INTO BUCKETS clause (which partitions
- * a key range into a fixed number of buckets).
- */
-public abstract class Table implements CatalogObject {
-  private static final Logger LOG = Logger.getLogger(Table.class);
-
-  // Lock used to serialize calls to the Hive MetaStore to work around MetaStore
-  // concurrency bugs. Currently used to serialize calls to "getTable()" due to HIVE-5457.
-  private static final Object metastoreAccessLock_ = new Object();
-  private long catalogVersion_ = Catalog.INITIAL_CATALOG_VERSION;
-  protected org.apache.hadoop.hive.metastore.api.Table msTable_;
-
-  protected final TableId id_;
-  protected final Db db_;
-  protected final String name_;
-  protected final String owner_;
-  protected TTableDescriptor tableDesc_;
-  protected TAccessLevel accessLevel_ = TAccessLevel.READ_WRITE;
-
-  // Number of clustering columns.
-  protected int numClusteringCols_;
-
-  // estimated number of rows in table; -1: unknown.
-  protected long numRows_ = -1;
-
-  // colsByPos[i] refers to the ith column in the table. The first numClusteringCols are
-  // the clustering columns.
-  protected final ArrayList<Column> colsByPos_ = Lists.newArrayList();
-
-  // map from lowercase column name to Column object.
-  private final Map<String, Column> colsByName_ = Maps.newHashMap();
-
-  // Type of this table (array of struct) that mirrors the columns. Useful for analysis.
-  protected final ArrayType type_ = new ArrayType(new StructType());
-
-  // The lastDdlTime for this table; -1 if not set
-  protected long lastDdlTime_;
-
-  // Set of supported table types.
-  protected static EnumSet<TableType> SUPPORTED_TABLE_TYPES = EnumSet.of(
-      TableType.EXTERNAL_TABLE, TableType.MANAGED_TABLE, TableType.VIRTUAL_VIEW);
-
-  protected Table(TableId id, org.apache.hadoop.hive.metastore.api.Table msTable, Db db,
-      String name, String owner) {
-    id_ = id;
-    msTable_ = msTable;
-    db_ = db;
-    name_ = name.toLowerCase();
-    owner_ = owner;
-    lastDdlTime_ = (msTable_ != null) ?
-        CatalogServiceCatalog.getLastDdlTime(msTable_) : -1;
-  }
-
-  public abstract TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions);
-  public abstract TCatalogObjectType getCatalogObjectType();
-
-  /**
-   * Populate members of 'this' from metastore info. If 'reuseMetadata' is true, reuse
-   * valid existing metadata.
-   */
-  public abstract void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException;
-
-  public void addColumn(Column col) {
-    colsByPos_.add(col);
-    colsByName_.put(col.getName().toLowerCase(), col);
-    ((StructType) type_.getItemType()).addField(
-        new StructField(col.getName(), col.getType(), col.getComment()));
-  }
-
-  public void clearColumns() {
-    colsByPos_.clear();
-    colsByName_.clear();
-    ((StructType) type_.getItemType()).clearFields();
-  }
-
-  /**
-   * Updates the lastDdlTime for this Table, if the new value is greater
-   * than the existing value. Does nothing if the new value is less than
-   * or equal to the existing value.
-   */
-  public void updateLastDdlTime(long ddlTime) {
-    // Ensure the lastDdlTime never goes backwards.
-    if (ddlTime > lastDdlTime_) lastDdlTime_ = ddlTime;
-  }
-
-  // Returns a list of all column names for this table which we expect to have column
-  // stats in the HMS. This exists because, when we request the column stats from HMS,
-  // including a column name that does not have stats causes the
-  // getTableColumnStatistics() to return nothing. For Hdfs tables, partition columns do
-  // not have column stats in the HMS, but HBase table clustering columns do have column
-  // stats. This method allows each table type to volunteer the set of columns we should
-  // ask the metastore for in loadAllColumnStats().
-  protected List<String> getColumnNamesWithHmsStats() {
-    List<String> ret = Lists.newArrayList();
-    for (String name: colsByName_.keySet()) ret.add(name);
-    return ret;
-  }
-
-  /**
-   * Loads column statistics for all columns in this table from the Hive metastore. Any
-   * errors are logged and ignored, since the absence of column stats is not critical to
-   * the correctness of the system.
-   */
-  protected void loadAllColumnStats(IMetaStoreClient client) {
-    LOG.debug("Loading column stats for table: " + name_);
-    List<ColumnStatisticsObj> colStats;
-
-    // We need to only query those columns which may have stats; asking HMS for other
-    // columns causes loadAllColumnStats() to return nothing.
-    List<String> colNames = getColumnNamesWithHmsStats();
-
-    try {
-      colStats = client.getTableColumnStatistics(db_.getName(), name_, colNames);
-    } catch (Exception e) {
-      LOG.warn("Could not load column statistics for: " + getFullName(), e);
-      return;
-    }
-
-    for (ColumnStatisticsObj stats: colStats) {
-      Column col = getColumn(stats.getColName());
-      Preconditions.checkNotNull(col);
-      if (!ColumnStats.isSupportedColType(col.getType())) {
-        LOG.warn(String.format("Statistics for %s, column %s are not supported as " +
-                "column has type %s", getFullName(), col.getName(), col.getType()));
-        continue;
-      }
-
-      if (!col.updateStats(stats.getStatsData())) {
-        LOG.warn(String.format("Failed to load column stats for %s, column %s. Stats " +
-            "may be incompatible with column type %s. Consider regenerating statistics " +
-            "for %s.", getFullName(), col.getName(), col.getType(), getFullName()));
-        continue;
-      }
-    }
-  }
-
-  /**
-   * Returns the value of the ROW_COUNT constant, or -1 if not found.
-   */
-  protected static long getRowCount(Map<String, String> parameters) {
-    if (parameters == null) return -1;
-    String numRowsStr = parameters.get(StatsSetupConst.ROW_COUNT);
-    if (numRowsStr == null) return -1;
-    try {
-      return Long.valueOf(numRowsStr);
-    } catch (NumberFormatException exc) {
-      // ignore
-    }
-    return -1;
-  }
-
-  /**
-   * Creates a table of the appropriate type based on the given hive.metastore.api.Table
-   * object.
-   */
-  public static Table fromMetastoreTable(TableId id, Db db,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    // Create a table of appropriate type
-    Table table = null;
-    if (TableType.valueOf(msTbl.getTableType()) == TableType.VIRTUAL_VIEW) {
-      table = new View(id, msTbl, db, msTbl.getTableName(), msTbl.getOwner());
-    } else if (HBaseTable.isHBaseTable(msTbl)) {
-      table = new HBaseTable(id, msTbl, db, msTbl.getTableName(), msTbl.getOwner());
-    } else if (KuduTable.isKuduTable(msTbl)) {
-      table = new KuduTable(id, msTbl, db, msTbl.getTableName(), msTbl.getOwner());
-    } else if (DataSourceTable.isDataSourceTable(msTbl)) {
-      // It's important to check if this is a DataSourceTable before HdfsTable because
-      // DataSourceTables are still represented by HDFS tables in the metastore but
-      // have a special table property to indicate that Impala should use an external
-      // data source.
-      table = new DataSourceTable(id, msTbl, db, msTbl.getTableName(), msTbl.getOwner());
-    } else if (HdfsFileFormat.isHdfsInputFormatClass(msTbl.getSd().getInputFormat())) {
-      table = new HdfsTable(id, msTbl, db, msTbl.getTableName(), msTbl.getOwner());
-    }
-    return table;
-  }
-
-  /**
-   * Factory method that creates a new Table from its Thrift representation.
-   * Determines the type of table to create based on the Thrift table provided.
-   */
-  public static Table fromThrift(Db parentDb, TTable thriftTable)
-      throws TableLoadingException {
-    Table newTable;
-    if (!thriftTable.isSetLoad_status() && thriftTable.isSetMetastore_table())  {
-      newTable = Table.fromMetastoreTable(new TableId(thriftTable.getId()),
-          parentDb, thriftTable.getMetastore_table());
-    } else {
-      newTable = IncompleteTable.createUninitializedTable(
-          TableId.createInvalidId(), parentDb, thriftTable.getTbl_name());
-    }
-    newTable.loadFromThrift(thriftTable);
-    newTable.validate();
-    return newTable;
-  }
-
-  protected void loadFromThrift(TTable thriftTable) throws TableLoadingException {
-    List<TColumn> columns = new ArrayList<TColumn>();
-    columns.addAll(thriftTable.getClustering_columns());
-    columns.addAll(thriftTable.getColumns());
-
-    colsByPos_.clear();
-    colsByPos_.ensureCapacity(columns.size());
-    for (int i = 0; i < columns.size(); ++i) {
-      Column col = Column.fromThrift(columns.get(i));
-      colsByPos_.add(col.getPosition(), col);
-      colsByName_.put(col.getName().toLowerCase(), col);
-      ((StructType) type_.getItemType()).addField(
-          new StructField(col.getName(), col.getType(), col.getComment()));
-    }
-
-    numClusteringCols_ = thriftTable.getClustering_columns().size();
-
-    // Estimated number of rows
-    numRows_ = thriftTable.isSetTable_stats() ?
-        thriftTable.getTable_stats().getNum_rows() : -1;
-
-    // Default to READ_WRITE access if the field is not set.
-    accessLevel_ = thriftTable.isSetAccess_level() ? thriftTable.getAccess_level() :
-        TAccessLevel.READ_WRITE;
-  }
-
-  /**
-   * Checks preconditions for this table to function as expected. Currently only checks
-   * that all entries in colsByName_ use lower case keys.
-   */
-  public void validate() throws TableLoadingException {
-    for (String colName: colsByName_.keySet()) {
-      if (!colName.equals(colName.toLowerCase())) {
-        throw new TableLoadingException(
-            "Expected lower case column name but found: " + colName);
-      }
-    }
-  }
-
-  public TTable toThrift() {
-    TTable table = new TTable(db_.getName(), name_);
-    table.setId(id_.asInt());
-    table.setAccess_level(accessLevel_);
-
-    // Populate both regular columns and clustering columns (if there are any).
-    table.setColumns(new ArrayList<TColumn>());
-    table.setClustering_columns(new ArrayList<TColumn>());
-    for (int i = 0; i < colsByPos_.size(); ++i) {
-      TColumn colDesc = colsByPos_.get(i).toThrift();
-      // Clustering columns come first.
-      if (i < numClusteringCols_) {
-        table.addToClustering_columns(colDesc);
-      } else {
-        table.addToColumns(colDesc);
-      }
-    }
-
-    table.setMetastore_table(getMetaStoreTable());
-    if (numRows_ != -1) {
-      table.setTable_stats(new TTableStats());
-      table.getTable_stats().setNum_rows(numRows_);
-    }
-    return table;
-  }
-
-  public TCatalogObject toTCatalogObject() {
-    TCatalogObject catalogObject = new TCatalogObject();
-    catalogObject.setType(getCatalogObjectType());
-    catalogObject.setCatalog_version(getCatalogVersion());
-    catalogObject.setTable(toThrift());
-    return catalogObject;
-  }
-
-  /**
-   * Gets the ColumnType from the given FieldSchema by using Impala's SqlParser.
-   * Throws a TableLoadingException if the FieldSchema could not be parsed.
-   * The type can either be:
-   *   - Supported by Impala, in which case the type is returned.
-   *   - A type Impala understands but is not yet implemented (e.g. date), the type is
-   *     returned but type.IsSupported() returns false.
-   *   - A supported type that exceeds an Impala limit, e.g., on the nesting depth.
-   *   - A type Impala can't understand at all, and a TableLoadingException is thrown.
-   */
-   protected Type parseColumnType(FieldSchema fs) throws TableLoadingException {
-     Type type = Type.parseColumnType(fs.getType());
-     if (type == null) {
-       throw new TableLoadingException(String.format(
-           "Unsupported type '%s' in column '%s' of table '%s'",
-           fs.getType(), fs.getName(), getName()));
-     }
-     if (type.exceedsMaxNestingDepth()) {
-       throw new TableLoadingException(String.format(
-           "Type exceeds the maximum nesting depth of %s:\n%s",
-           Type.MAX_NESTING_DEPTH, type.toSql()));
-     }
-     return type;
-   }
-
-  public Db getDb() { return db_; }
-  public String getName() { return name_; }
-  public String getFullName() { return (db_ != null ? db_.getName() + "." : "") + name_; }
-  public TableName getTableName() {
-    return new TableName(db_ != null ? db_.getName() : null, name_);
-  }
-
-  public String getOwner() { return owner_; }
-  public ArrayList<Column> getColumns() { return colsByPos_; }
-
-  /**
-   * Returns a list of the column names ordered by position.
-   */
-  public List<String> getColumnNames() {
-    List<String> colNames = Lists.<String>newArrayList();
-    for (Column col: colsByPos_) {
-      colNames.add(col.getName());
-    }
-    return colNames;
-  }
-
-  /**
-   * Returns a list of thrift column descriptors ordered by position.
-   */
-  public List<TColumnDescriptor> getTColumnDescriptors() {
-    List<TColumnDescriptor> colDescs = Lists.<TColumnDescriptor>newArrayList();
-    for (Column col: colsByPos_) {
-      colDescs.add(new TColumnDescriptor(col.getName(), col.getType().toThrift()));
-    }
-    return colDescs;
-  }
-
-  /**
-   * Subclasses should override this if they provide a storage handler class. Currently
-   * only HBase tables need to provide a storage handler.
-   */
-  public String getStorageHandlerClassName() { return null; }
-
-  /**
-   * Returns the list of all columns, but with partition columns at the end of
-   * the list rather than the beginning. This is equivalent to the order in
-   * which Hive enumerates columns.
-   */
-  public ArrayList<Column> getColumnsInHiveOrder() {
-    ArrayList<Column> columns = Lists.newArrayList(getNonClusteringColumns());
-    columns.addAll(getClusteringColumns());
-    return columns;
-  }
-
-  /**
-   * Returns a struct type with the columns in the same order as getColumnsInHiveOrder().
-   */
-  public StructType getHiveColumnsAsStruct() {
-    ArrayList<StructField> fields = Lists.newArrayListWithCapacity(colsByPos_.size());
-    for (Column col: getColumnsInHiveOrder()) {
-      fields.add(new StructField(col.getName(), col.getType(), col.getComment()));
-    }
-    return new StructType(fields);
-  }
-
-  /**
-   * Returns the list of all partition columns.
-   */
-  public List<Column> getClusteringColumns() {
-    return colsByPos_.subList(0, numClusteringCols_);
-  }
-
-  /**
-   * Returns the list of all columns excluding any partition columns.
-   */
-  public List<Column> getNonClusteringColumns() {
-    return colsByPos_.subList(numClusteringCols_, colsByPos_.size());
-  }
-
-  /**
-   * Case-insensitive lookup.
-   */
-  public Column getColumn(String name) { return colsByName_.get(name.toLowerCase()); }
-
-  /**
-   * Returns the metastore.api.Table object this Table was created from. Returns null
-   * if the derived Table object was not created from a metastore Table (ex. InlineViews).
-   */
-  public org.apache.hadoop.hive.metastore.api.Table getMetaStoreTable() {
-    return msTable_;
-  }
-
-  public void setMetaStoreTable(org.apache.hadoop.hive.metastore.api.Table msTbl) {
-    msTable_ = msTbl;
-  }
-
-  public int getNumClusteringCols() { return numClusteringCols_; }
-  public TableId getId() { return id_; }
-  public long getNumRows() { return numRows_; }
-  public ArrayType getType() { return type_; }
-
-  @Override
-  public long getCatalogVersion() { return catalogVersion_; }
-
-  @Override
-  public void setCatalogVersion(long catalogVersion) {
-    catalogVersion_ = catalogVersion;
-  }
-
-  @Override
-  public boolean isLoaded() { return true; }
-
-  /**
-   * If the table is cached, it returns a <cache pool name, replication factor> pair
-   * and adds the table cached directive ID to 'cacheDirIds'. Otherwise, it
-   * returns a <null, 0> pair.
-   */
-  public Pair<String, Short> getTableCacheInfo(List<Long> cacheDirIds) {
-    String cachePoolName = null;
-    Short cacheReplication = 0;
-    Long cacheDirId = HdfsCachingUtil.getCacheDirectiveId(msTable_.getParameters());
-    if (cacheDirId != null) {
-      try {
-        cachePoolName = HdfsCachingUtil.getCachePool(cacheDirId);
-        cacheReplication = HdfsCachingUtil.getCacheReplication(cacheDirId);
-        Preconditions.checkNotNull(cacheReplication);
-        if (numClusteringCols_ == 0) cacheDirIds.add(cacheDirId);
-      } catch (ImpalaRuntimeException e) {
-        // Catch the error so that the actual update to the catalog can progress,
-        // this resets caching for the table though
-        LOG.error(
-            String.format("Cache directive %d was not found, uncache the table %s " +
-                "to remove this message.", cacheDirId, getFullName()));
-        cacheDirId = null;
-      }
-    }
-    return new Pair<String, Short>(cachePoolName, cacheReplication);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableId.java b/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
deleted file mode 100644
index 1918029..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class TableId extends Id<TableId> {
-  // Construction only allowed via an IdGenerator.
-  protected TableId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<TableId> createGenerator() {
-    return new IdGenerator<TableId>() {
-      @Override
-      public TableId getNextId() { return new TableId(nextId_++); }
-      @Override
-      public TableId getMaxId() { return new TableId(nextId_ - 1); }
-    };
-  }
-
-  /**
-   * Returns an invalid table id intended for temporary use, e.g., for CTAS.
-   */
-  public static TableId createInvalidId() { return new TableId(INVALID_ID); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/TableLoader.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableLoader.java b/fe/src/main/java/com/cloudera/impala/catalog/TableLoader.java
deleted file mode 100644
index 78b58f6..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableLoader.java
+++ /dev/null
@@ -1,101 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.EnumSet;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-
-/**
- * Class that implements the logic for how a table's metadata should be loaded from
- * the Hive Metastore / HDFS / etc.
- */
-public class TableLoader {
-  private static final Logger LOG = Logger.getLogger(TableLoader.class);
-
-  // Set of supported table types.
-  private static EnumSet<TableType> SUPPORTED_TABLE_TYPES = EnumSet.of(
-      TableType.EXTERNAL_TABLE, TableType.MANAGED_TABLE, TableType.VIRTUAL_VIEW);
-
-  private final CatalogServiceCatalog catalog_;
-
-  // Lock used to serialize calls to the Hive MetaStore to work around MetaStore
-  // concurrency bugs. Currently used to serialize calls to "getTable()" due to
-  // HIVE-5457.
-  private static final Object metastoreAccessLock_ = new Object();
-
-  public TableLoader(CatalogServiceCatalog catalog) {
-    catalog_ = catalog;
-  }
-
-  /**
-   * Creates the Impala representation of Hive/HBase metadata for one table.
-   * Calls load() on the appropriate instance of Table subclass.
-   * Returns new instance of Table, If there were any errors loading the table metadata
-   * an IncompleteTable will be returned that contains details on the error.
-   */
-  public Table load(Db db, String tblName) {
-    String fullTblName = db.getName() + "." + tblName;
-    LOG.info("Loading metadata for: " + fullTblName);
-    Table table;
-    // turn all exceptions into TableLoadingException
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      org.apache.hadoop.hive.metastore.api.Table msTbl = null;
-      // All calls to getTable() need to be serialized due to HIVE-5457.
-      synchronized (metastoreAccessLock_) {
-        msTbl = msClient.getHiveClient().getTable(db.getName(), tblName);
-      }
-      // Check that the Hive TableType is supported
-      TableType tableType = TableType.valueOf(msTbl.getTableType());
-      if (!SUPPORTED_TABLE_TYPES.contains(tableType)) {
-        throw new TableLoadingException(String.format(
-            "Unsupported table type '%s' for: %s", tableType, fullTblName));
-      }
-
-      // Create a table of appropriate type and have it load itself
-      table = Table.fromMetastoreTable(catalog_.getNextTableId(), db, msTbl);
-      if (table == null) {
-        throw new TableLoadingException(
-            "Unrecognized table type for table: " + fullTblName);
-      }
-      table.load(false, msClient.getHiveClient(), msTbl);
-      table.validate();
-    } catch (TableLoadingException e) {
-      table = IncompleteTable.createFailedMetadataLoadTable(
-          TableId.createInvalidId(), db, tblName, e);
-    } catch (NoSuchObjectException e) {
-      TableLoadingException tableDoesNotExist = new TableLoadingException(
-          "Table " + fullTblName + " no longer exists in the Hive MetaStore. " +
-          "Run 'invalidate metadata " + fullTblName + "' to update the Impala " +
-          "catalog.");
-      table = IncompleteTable.createFailedMetadataLoadTable(
-          TableId.createInvalidId(), db, tblName, tableDoesNotExist);
-    } catch (Exception e) {
-      table = IncompleteTable.createFailedMetadataLoadTable(
-          catalog_.getNextTableId(), db, tblName, new TableLoadingException(
-          "Failed to load metadata for table: " + fullTblName + ". Running " +
-          "'invalidate metadata " + fullTblName + "' may resolve this problem.", e));
-    }
-    return table;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingException.java b/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingException.java
deleted file mode 100644
index ab55675..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown when a table metadata cannot be loaded due to an error.
- */
-public class TableLoadingException extends CatalogException {
-  // Dummy serial UID to avoid Eclipse warnings
-  private static final long serialVersionUID = 7760580025530083536L;
-
-  public TableLoadingException(String s, Throwable cause) {
-    super(s, cause);
-  }
-
-  public TableLoadingException(String s) {
-    super(s);
-  }
-};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingMgr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingMgr.java b/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingMgr.java
deleted file mode 100644
index 57cc513..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableLoadingMgr.java
+++ /dev/null
@@ -1,333 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
-* Class that manages scheduling the loading of table metadata from the Hive Metastore and
-* the Hadoop NameNode. Loads tables using a pool of table loading threads. New load
-* requests can be submitted using loadAsync(), which will schedule the load when the
-* next thread becomes available.  Also manages prioritized background table loading by
-* reading from a deque of table names to determine which table to load next. Tables added
-* to the head of the deque will be loaded before tables added to the tail, so the loading
-* order can be prioritized (see prioritizeLoad()/backgroundLoad()).
-*/
-public class TableLoadingMgr {
-  /**
-   * Represents the result of an asynchronous Table loading request. Calling
-   * get() will block until the Table has completed loading. When finished
-   * processing the request, call close() to clean up.
-   */
-  public class LoadRequest {
-    private final Future<Table> tblTask_;
-    private final TTableName tblName_;
-
-    private LoadRequest(TTableName tblName, Future<Table> tblTask) {
-      tblTask_ = tblTask;
-      tblName_ = tblName;
-    }
-
-    /**
-     * Blocks until the table has finished loading and returns the result. If any errors
-     * were encountered while loading the table an IncompleteTable will be returned.
-     */
-    public Table get() {
-      Table tbl;
-      try {
-        tbl = tblTask_.get();
-      } catch (Exception e) {
-        tbl = IncompleteTable.createFailedMetadataLoadTable(
-            TableId.createInvalidId(), catalog_.getDb(tblName_.getDb_name()),
-            tblName_.getTable_name(), new TableLoadingException(e.getMessage(), e));
-      }
-      Preconditions.checkState(tbl.isLoaded());
-      return tbl;
-    }
-
-    /**
-     * Cleans up the in-flight load request matching the given table name. Will not
-     * cancel the load if it is still in progress, frees a slot should another
-     * load for the same table come in. Can be called multiple times.
-     */
-    public void close() {
-      synchronized (loadingTables_) {
-        if (loadingTables_.get(tblName_) == tblTask_) loadingTables_.remove(tblName_);
-      }
-    }
-  }
-
-  private static final Logger LOG = Logger.getLogger(TableLoadingMgr.class);
-
-  // A thread safe blocking deque that is used to prioritize the loading of table
-  // metadata. The CatalogServer has a background thread that will always add unloaded
-  // tables to the tail of the deque. However, a call to prioritizeLoad() will add
-  // tables to the head of the deque. The next table to load is always taken from the
-  // head of the deque. May contain the same table multiple times, but a second
-  // attempt to load the table metadata will be a no-op.
-  private final LinkedBlockingDeque<TTableName> tableLoadingDeque_ =
-      new LinkedBlockingDeque<TTableName>();
-
-  // A thread safe HashSet of table names that are in the tableLoadingDeque_. Used to
-  // efficiently check for existence of items in the deque.
-  // Updates may lead/lag updates to the tableLoadingDeque_ - they are added to this set
-  // immediately before being added to the deque and removed immediately after removing
-  // from the deque. The fact the updates are not synchronized shouldn't impact
-  // functionality since this set is only used for efficient lookups.
-  private final Set<TTableName> tableLoadingSet_ =
-      Collections.synchronizedSet(new HashSet<TTableName>());
-
-  // Map of table name to a FutureTask associated with the table load. Used to
-  // prevent duplicate loads of the same table.
-  private final ConcurrentHashMap<TTableName, FutureTask<Table>> loadingTables_ =
-      new ConcurrentHashMap<TTableName, FutureTask<Table>>();
-
-  // Map of table name to the cache directives that are being waited on for that table.
-  // Once all directives have completed, the table's metadata will be refreshed and
-  // the table will be removed from this map.
-  // A caching operation may take a long time to complete, so to maximize query
-  // throughput it is preferable to allow the user to continue to run queries against
-  // the table while a cache request completes in the background.
-  private final Map<TTableName, List<Long>> pendingTableCacheDirs_ = Maps.newHashMap();
-
-  // The number of parallel threads to use to load table metadata. Should be set to a
-  // value that provides good throughput while not putting too much stress on the
-  // metastore.
-  private final int numLoadingThreads_;
-
-  // Pool of numLoadingThreads_ threads that loads table metadata. If additional tasks
-  // are submitted to the pool after it is full, they will be queued and executed when
-  // the next thread becomes available. There is no hard upper limit on the number of
-  // pending tasks (no work will be rejected, but memory consumption is unbounded).
-  private final ExecutorService tblLoadingPool_;
-
-  // Thread that incrementally refreshes tables in the background. Used to update a
-  // table's metadata after a long running operation completes, such as marking a
-  // table as cached. There is no hard upper limit on the number of pending tasks
-  // (no work will be rejected, but memory consumption is unbounded). If this thread
-  // dies it will be automatically restarted.
-  // The tables to process are read from the resfreshThreadWork_ queue.
-  ExecutorService asyncRefreshThread_ = Executors.newSingleThreadExecutor();
-
-  // Tables for the async refresh thread to process. Synchronization must be handled
-  // externally.
-  private final LinkedBlockingQueue<TTableName> refreshThreadWork_ =
-      new LinkedBlockingQueue<TTableName>();
-
-  private final CatalogServiceCatalog catalog_;
-  private final TableLoader tblLoader_;
-
-  public TableLoadingMgr(CatalogServiceCatalog catalog, int numLoadingThreads) {
-    catalog_ = catalog;
-    tblLoader_ = new TableLoader(catalog_);
-    numLoadingThreads_ = numLoadingThreads;
-    tblLoadingPool_ = Executors.newFixedThreadPool(numLoadingThreads_);
-
-    // Start the background table loading threads.
-    startTableLoadingThreads();
-
-    // Start the asyncRefreshThread_. Currently used to wait for cache directives to
-    // complete in the background.
-    asyncRefreshThread_.submit(new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        while(true) {
-          execAsyncRefreshWork(refreshThreadWork_.take());
-        }
-      }});
-  }
-
-  /**
-   * Prioritizes the loading of the given table.
-   */
-  public void prioritizeLoad(TTableName tblName) {
-    tableLoadingSet_.add(tblName);
-    tableLoadingDeque_.offerFirst(tblName);
-  }
-
-  /**
-   * Submits a single table for background (low priority) loading.
-   */
-  public void backgroundLoad(TTableName tblName) {
-    // Only queue for background loading if the table doesn't already exist
-    // in the table loading set.
-    if (tableLoadingSet_.add(tblName)) {
-      tableLoadingDeque_.offerLast(tblName);
-    }
-  }
-
-  /**
-   * Adds a list of cache directive IDs to watch for the given table name.
-   * The asyncRefreshThread_ will process the cache directives and once all directives
-   * complete (data has been cached or no progress is being made), the
-   * asyncRefreshThread_ will refresh the table metadata. After processing the
-   * request the watch will be deleted.
-   */
-  public void watchCacheDirs(List<Long> cacheDirIds, final TTableName tblName) {
-    synchronized (pendingTableCacheDirs_) {
-      // A single table may have multiple pending cache requests since one request
-      // gets submitted per-partition.
-      List<Long> existingCacheReqIds = pendingTableCacheDirs_.get(tblName);
-      if (existingCacheReqIds == null) {
-        existingCacheReqIds = cacheDirIds;
-        pendingTableCacheDirs_.put(tblName, cacheDirIds);
-        refreshThreadWork_.add(tblName);
-      } else {
-        existingCacheReqIds.addAll(cacheDirIds);
-      }
-    }
-  }
-
-  /**
-   * Loads a table asynchronously, returning a LoadRequest that can be used to get
-   * the result (a Table). If there is already a load in flight for this table name,
-   * the same underlying loading task (Future) will be used, helping to prevent duplicate
-   * loads of the same table.
-   */
-  public LoadRequest loadAsync(final TTableName tblName)
-      throws DatabaseNotFoundException {
-    final Db parentDb = catalog_.getDb(tblName.getDb_name());
-    if (parentDb == null) {
-      throw new DatabaseNotFoundException(
-          "Database '" + tblName.getDb_name() + "' was not found.");
-    }
-
-    FutureTask<Table> tableLoadTask = new FutureTask<Table>(new Callable<Table>() {
-        @Override
-        public Table call() throws Exception {
-          return tblLoader_.load(parentDb, tblName.table_name);
-        }});
-
-    FutureTask<Table> existingValue = loadingTables_.putIfAbsent(tblName, tableLoadTask);
-    if (existingValue == null) {
-      // There was no existing value, submit a new load request.
-      tblLoadingPool_.execute(tableLoadTask);
-    } else {
-      tableLoadTask = existingValue;
-    }
-    return new LoadRequest(tblName, tableLoadTask);
-  }
-
-  /**
-   * Starts table loading threads in a fixed sized thread pool with a size
-   * defined by NUM_TBL_LOADING_THREADS. Each thread polls the tableLoadingDeque_
-   * for new tables to load.
-   */
-  private void startTableLoadingThreads() {
-    ExecutorService loadingPool = Executors.newFixedThreadPool(numLoadingThreads_);
-    try {
-      for (int i = 0; i < numLoadingThreads_; ++i) {
-        loadingPool.execute(new Runnable() {
-          @Override
-          public void run() {
-            while (true) {
-              try {
-                loadNextTable();
-              } catch (Exception e) {
-                LOG.error("Error loading table: ", e);
-                // Ignore exception.
-              }
-            }
-          }
-        });
-      }
-    } finally {
-      loadingPool.shutdown();
-    }
-  }
-
-  /**
-   * Gets the next table name to load off the head of the table loading queue. If
-   * the queue is empty, this will block until a new table is added.
-   */
-  private void loadNextTable() throws InterruptedException {
-    // Always get the next table from the head of the deque.
-    final TTableName tblName = tableLoadingDeque_.takeFirst();
-    tableLoadingSet_.remove(tblName);
-    LOG.debug("Loading next table. Remaining items in queue: "
-        + tableLoadingDeque_.size());
-    try {
-      // TODO: Instead of calling "getOrLoad" here we could call "loadAsync". We would
-      // just need to add a mechanism for moving loaded tables into the Catalog.
-      catalog_.getOrLoadTable(tblName.getDb_name(), tblName.getTable_name());
-    } catch (CatalogException e) {
-      // Ignore.
-    }
-  }
-
-  /**
-   * Executes all async refresh work for the specified table name.
-   */
-  private void execAsyncRefreshWork(TTableName tblName) {
-    if (!waitForCacheDirs(tblName)) return;
-    try {
-      // Reload the table metadata to pickup the new cached block location information.
-      catalog_.reloadTable(tblName);
-    } catch (CatalogException e) {
-      LOG.error("Error reloading cached table: ", e);
-    }
-  }
-
-  /**
-   * Waits for all pending cache directives on a table to complete.
-   * Returns true if a refresh is needed and false if a refresh is not needed.
-   */
-  private boolean waitForCacheDirs(TTableName tblName) {
-    boolean isRefreshNeeded = false;
-    // Keep processing cache directives for this table until there are none left.
-    while (true) {
-      // Get all pending requests for this table.
-      List<Long> cacheDirIds = null;
-      synchronized (pendingTableCacheDirs_) {
-        cacheDirIds = pendingTableCacheDirs_.remove(tblName);
-      }
-      if (cacheDirIds == null || cacheDirIds.size() == 0) return isRefreshNeeded;
-      isRefreshNeeded = true;
-
-      // Wait for each cache request to complete.
-      for (Long dirId: cacheDirIds) {
-        if (dirId == null) continue;
-        try {
-          HdfsCachingUtil.waitForDirective(dirId);
-        } catch (Exception e) {
-          LOG.error(String.format(
-              "Error waiting for cache request %d to complete: ", dirId), e);
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/TableNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableNotFoundException.java b/fe/src/main/java/com/cloudera/impala/catalog/TableNotFoundException.java
deleted file mode 100644
index 26686d1..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableNotFoundException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown when a table cannot be found in the catalog.
- */
-public class TableNotFoundException extends CatalogException {
-  // Dummy serial UID to avoid Eclipse warnings
-  private static final long serialVersionUID = -2203080667446640542L;
-
-  public TableNotFoundException(String s) { super(s); }
-
-  public TableNotFoundException(String s, Exception cause) { super(s, cause); }
-}
\ No newline at end of file


[16/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SortNode.java b/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
deleted file mode 100644
index 02f0df6..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
+++ /dev/null
@@ -1,242 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TSortInfo;
-import com.cloudera.impala.thrift.TSortNode;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Node that implements a sort with or without a limit. useTopN_ is true for sorts
- * with limits that are implemented by a TopNNode in the backend. SortNode is used
- * otherwise.
- * Will always materialize the new tuple info_.sortTupleDesc_.
- */
-public class SortNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(SortNode.class);
-
-  private final SortInfo info_;
-
-  // if set, this SortNode requires its input to have this data partition
-  private DataPartition inputPartition_;
-
-  // if true, the output of this node feeds an AnalyticNode
-  private boolean isAnalyticSort_;
-
-  // info_.sortTupleSlotExprs_ substituted with the outputSmap_ for materialized slots
-  // in init().
-  private List<Expr> resolvedTupleExprs_;
-  private final boolean useTopN_;
-  // The offset of the first row to return.
-  protected long offset_;
-
-  public SortNode(PlanNodeId id, PlanNode input, SortInfo info, boolean useTopN,
-      long offset) {
-    super(id, info.getSortTupleDescriptor().getId().asList(),
-        getDisplayName(useTopN, false));
-    info_ = info;
-    useTopN_ = useTopN;
-    children_.add(input);
-    offset_ = offset;
-  }
-
-  public long getOffset() { return offset_; }
-  public void setOffset(long offset) { offset_ = offset; }
-  public boolean hasOffset() { return offset_ > 0; }
-  public boolean useTopN() { return useTopN_; }
-  public SortInfo getSortInfo() { return info_; }
-  public void setInputPartition(DataPartition inputPartition) {
-    inputPartition_ = inputPartition;
-  }
-  public DataPartition getInputPartition() { return inputPartition_; }
-  public boolean isAnalyticSort() { return isAnalyticSort_; }
-  public void setIsAnalyticSort(boolean v) { isAnalyticSort_ = v; }
-
-  @Override
-  public boolean isBlockingNode() { return true; }
-
-  @Override
-  public void init(Analyzer analyzer) throws InternalException {
-    // Do not assignConjuncts() here, so that conjuncts bound by this SortNode's tuple id
-    // can be placed in a downstream SelectNode. A SortNode cannot evaluate conjuncts.
-    Preconditions.checkState(conjuncts_.isEmpty());
-    // Compute the memory layout for the generated tuple.
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-
-    // populate resolvedTupleExprs_ and outputSmap_
-    List<SlotDescriptor> sortTupleSlots = info_.getSortTupleDescriptor().getSlots();
-    List<Expr> slotExprs = info_.getSortTupleSlotExprs();
-    Preconditions.checkState(sortTupleSlots.size() == slotExprs.size());
-    resolvedTupleExprs_ = Lists.newArrayList();
-    outputSmap_ = new ExprSubstitutionMap();
-    for (int i = 0; i < slotExprs.size(); ++i) {
-      if (!sortTupleSlots.get(i).isMaterialized()) continue;
-      resolvedTupleExprs_.add(slotExprs.get(i));
-      outputSmap_.put(slotExprs.get(i), new SlotRef(sortTupleSlots.get(i)));
-    }
-    ExprSubstitutionMap childSmap = getCombinedChildSmap();
-    resolvedTupleExprs_ =
-        Expr.substituteList(resolvedTupleExprs_, childSmap, analyzer, false);
-
-    // Remap the ordering exprs to the tuple materialized by this sort node. The mapping
-    // is a composition of the childSmap and the outputSmap_ because the child node may
-    // have also remapped its input (e.g., as in a a series of (sort->analytic)* nodes).
-    // Parent nodes have have to do the same so set the composition as the outputSmap_.
-    outputSmap_ = ExprSubstitutionMap.compose(childSmap, outputSmap_, analyzer);
-
-    info_.substituteOrderingExprs(outputSmap_, analyzer);
-    info_.checkConsistency();
-
-    LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
-        + outputSmap_.debugString());
-    LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = capAtLimit(getChild(0).cardinality_);
-    LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
-  }
-
-  @Override
-  protected String debugString() {
-    List<String> strings = Lists.newArrayList();
-    for (Boolean isAsc : info_.getIsAscOrder()) {
-      strings.add(isAsc ? "a" : "d");
-    }
-    return Objects.toStringHelper(this)
-        .add("ordering_exprs", Expr.debugString(info_.getOrderingExprs()))
-        .add("is_asc", "[" + Joiner.on(" ").join(strings) + "]")
-        .add("nulls_first", "[" + Joiner.on(" ").join(info_.getNullsFirst()) + "]")
-        .add("offset_", offset_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.SORT_NODE;
-    TSortInfo sort_info = new TSortInfo(Expr.treesToThrift(info_.getOrderingExprs()),
-        info_.getIsAscOrder(), info_.getNullsFirst());
-    Preconditions.checkState(tupleIds_.size() == 1,
-        "Incorrect size for tupleIds_ in SortNode");
-    sort_info.sort_tuple_slot_exprs = Expr.treesToThrift(resolvedTupleExprs_);
-    TSortNode sort_node = new TSortNode(sort_info, useTopN_);
-    sort_node.setOffset(offset_);
-    msg.sort_node = sort_node;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s:%s%s\n", prefix, id_.toString(),
-        displayName_, getNodeExplainDetail(detailLevel)));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      output.append(detailPrefix + "order by: ");
-      for (int i = 0; i < info_.getOrderingExprs().size(); ++i) {
-        if (i > 0) output.append(", ");
-        output.append(info_.getOrderingExprs().get(i).toSql() + " ");
-        output.append(info_.getIsAscOrder().get(i) ? "ASC" : "DESC");
-
-        Boolean nullsFirstParam = info_.getNullsFirstParams().get(i);
-        if (nullsFirstParam != null) {
-          output.append(nullsFirstParam ? " NULLS FIRST" : " NULLS LAST");
-        }
-      }
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  private String getNodeExplainDetail(TExplainLevel detailLevel) {
-    if (!hasLimit()) return "";
-    if (hasOffset()) {
-      return String.format(" [LIMIT=%s OFFSET=%s]", limit_, offset_);
-    } else {
-      return String.format(" [LIMIT=%s]", limit_);
-    }
-  }
-
-  @Override
-  protected String getOffsetExplainString(String prefix) {
-    return offset_ != 0 ? prefix + "offset: " + Long.toString(offset_) + "\n" : "";
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    Preconditions.checkState(hasValidStats());
-    if (useTopN_) {
-      perHostMemCost_ = (long) Math.ceil((cardinality_ + offset_) * avgRowSize_);
-      return;
-    }
-
-    // For an external sort, set the memory cost to be what is required for a 2-phase
-    // sort. If the input to be sorted would take up N blocks in memory, then the
-    // memory required for a 2-phase sort is sqrt(N) blocks. A single run would be of
-    // size sqrt(N) blocks, and we could merge sqrt(N) such runs with sqrt(N) blocks
-    // of memory.
-    double fullInputSize = getChild(0).cardinality_ * avgRowSize_;
-    boolean hasVarLenSlots = false;
-    for (SlotDescriptor slotDesc: info_.getSortTupleDescriptor().getSlots()) {
-      if (slotDesc.isMaterialized() && !slotDesc.getType().isFixedLengthType()) {
-        hasVarLenSlots = true;
-        break;
-      }
-    }
-
-    // The block size used by the sorter is the same as the configured I/O read size.
-    long blockSize = BackendConfig.INSTANCE.getReadSize();
-    // The external sorter writes fixed-len and var-len data in separate sequences of
-    // blocks on disk and reads from both sequences when merging. This effectively
-    // doubles the block size when there are var-len columns present.
-    if (hasVarLenSlots) blockSize *= 2;
-    double numInputBlocks = Math.ceil(fullInputSize / blockSize);
-    perHostMemCost_ = blockSize * (long) Math.ceil(Math.sqrt(numInputBlocks));
-  }
-
-  private static String getDisplayName(boolean isTopN, boolean isMergeOnly) {
-    if (isTopN) {
-      return "TOP-N";
-    } else {
-      return "SORT";
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java b/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
deleted file mode 100644
index 4512268..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
+++ /dev/null
@@ -1,109 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * A SubplanNode evaluates its right child plan tree for every row from its left child,
- * and returns those rows produced by the right child. The right child is called the
- * 'subplan tree' and the left child the 'input'. A SubplanNode is similar to a join,
- * but different in the following respects. First, a SubplanNode does not do any real
- * work itself. It only returns rows produced by the right child plan tree, which
- * typically has a dependency on the current input row (see SingularRowSrcNode and
- * UnnestNode). Second, no join predicates are required. A SubplanNode does not
- * evaluate any conjuncts.
- */
-public class SubplanNode extends PlanNode {
-  private PlanNode subplan_;
-
-  public SubplanNode(PlanNode input) {
-    super("SUBPLAN");
-    children_.add(input);
-  }
-
-  /**
-   * Sets the subplan of this SubplanNode. Dependent plan nodes such as UnnestNodes
-   * and SingularRowSrcNodes need to know their SubplanNode parent, therefore, setting
-   * the subplan in this SubplanNode is deferred until the subplan tree has been
-   * constructed (which requires the parent SubplanNode to have been constructed).
-   */
-  public void setSubplan(PlanNode subplan) {
-    Preconditions.checkState(children_.size() == 1);
-    subplan_ = subplan;
-    children_.add(subplan);
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    Preconditions.checkNotNull(subplan_);
-    clearTupleIds();
-    tblRefIds_.addAll(subplan_.getTblRefIds());
-    tupleIds_.addAll(subplan_.getTupleIds());
-    nullableTupleIds_.addAll(subplan_.getNullableTupleIds());
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws InternalException {
-    // Subplan root must have been set.
-    Preconditions.checkState(children_.size() == 2);
-    // Check that there are no unassigned conjuncts that can be evaluated by this node.
-    // All such conjuncts should have already been assigned in the right child.
-    assignConjuncts(analyzer);
-    Preconditions.checkState(conjuncts_.isEmpty());
-    computeStats(analyzer);
-    outputSmap_ = getChild(1).getOutputSmap();
-    // Save state of assigned conjuncts for join-ordering attempts (see member comment).
-    assignedConjuncts_ = analyzer.getAssignedConjuncts();
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    if (getChild(0).cardinality_ != -1 && getChild(1).cardinality_ != -1) {
-      cardinality_ =
-          multiplyCardinalities(getChild(0).cardinality_, getChild(1).cardinality_);
-    } else {
-      cardinality_ = -1;
-    }
-    cardinality_ = capAtLimit(cardinality_);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s\n", prefix, getDisplayLabel()));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "predicates: " +
-            getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) { msg.node_type = TPlanNodeType.SUBPLAN_NODE; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/TableSink.java b/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
deleted file mode 100644
index ae73b54..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
+++ /dev/null
@@ -1,114 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TSinkAction;
-import com.google.common.base.Preconditions;
-
-import java.util.List;
-
-/**
- * A DataSink that writes into a table.
- *
- */
-public abstract class TableSink extends DataSink {
-
-  /**
-   * Enum to specify the sink operation type.
-   */
-  public enum Op {
-    INSERT {
-      @Override
-      public String toExplainString() { return "INSERT INTO"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.INSERT; }
-    },
-    UPDATE {
-      @Override
-      public String toExplainString() { return "UPDATE"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.UPDATE; }
-    },
-    DELETE {
-      @Override
-      public String toExplainString() { return "DELETE FROM"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.DELETE; }
-    };
-
-    public abstract String toExplainString();
-
-    public abstract TSinkAction toThrift();
-  }
-
-  // Table which is to be populated by this sink.
-  protected final Table targetTable_;
-  // The type of operation to be performed by this sink.
-  protected final Op sinkOp_;
-
-  public TableSink(Table targetTable, Op sinkAction) {
-    targetTable_ = targetTable;
-    sinkOp_ = sinkAction;
-  }
-
-  /**
-   * Returns an output sink appropriate for writing to the given table.
-   * Not all Ops are supported for all tables.
-   * All parameters must be non-null, the lists in particular need to be empty if they
-   * don't make sense for a certain table type.
-   */
-  public static TableSink create(Table table, Op sinkAction,
-      List<Expr> partitionKeyExprs,  List<Integer> referencedColumns,
-      boolean overwrite, boolean ignoreDuplicates) {
-    if (table instanceof HdfsTable) {
-      // Hdfs only supports inserts.
-      Preconditions.checkState(sinkAction == Op.INSERT);
-      // Referenced columns don't make sense for an Hdfs table.
-      Preconditions.checkState(referencedColumns.isEmpty());
-      return new HdfsTableSink(table, partitionKeyExprs, overwrite);
-    } else if (table instanceof HBaseTable) {
-      // HBase only supports inserts.
-      Preconditions.checkState(sinkAction == Op.INSERT);
-      // Partition clause doesn't make sense for an HBase table.
-      Preconditions.checkState(partitionKeyExprs.isEmpty());
-      // HBase doesn't have a way to perform INSERT OVERWRITE
-      Preconditions.checkState(overwrite == false);
-      // Referenced columns don't make sense for an HBase table.
-      Preconditions.checkState(referencedColumns.isEmpty());
-      // Create the HBaseTableSink and return it.
-      return new HBaseTableSink(table);
-    } else if (table instanceof KuduTable) {
-      // Kudu doesn't have a way to perform INSERT OVERWRITE.
-      Preconditions.checkState(overwrite == false);
-      // Partition clauses don't make sense for Kudu inserts.
-      Preconditions.checkState(partitionKeyExprs.isEmpty());
-      return new KuduTableSink(table, sinkAction, referencedColumns, ignoreDuplicates);
-    } else {
-      throw new UnsupportedOperationException(
-          "Cannot create data sink into table of type: " + table.getClass().getName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java b/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
deleted file mode 100644
index ef67277..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
+++ /dev/null
@@ -1,231 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnionNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Node that merges the results of its child plans by materializing
- * the corresponding result exprs into a new tuple.
- */
-public class UnionNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(UnionNode.class);
-
-  // Expr lists corresponding to the input query stmts.
-  // The ith resultExprList belongs to the ith child.
-  // All exprs are resolved to base tables.
-  protected List<List<Expr>> resultExprLists_ = Lists.newArrayList();
-
-  // Expr lists that originate from constant select stmts.
-  // We keep them separate from the regular expr lists to avoid null children.
-  protected List<List<Expr>> constExprLists_ = Lists.newArrayList();
-
-  // Materialized result/const exprs corresponding to materialized slots.
-  // Set in init() and substituted against the corresponding child's output smap.
-  protected List<List<Expr>> materializedResultExprLists_ = Lists.newArrayList();
-  protected List<List<Expr>> materializedConstExprLists_ = Lists.newArrayList();
-
-  protected final TupleId tupleId_;
-
-  protected UnionNode(PlanNodeId id, TupleId tupleId) {
-    super(id, tupleId.asList(), "UNION");
-    tupleId_ = tupleId;
-  }
-
-  public void addConstExprList(List<Expr> exprs) { constExprLists_.add(exprs); }
-
-  /**
-   * Returns true if this UnionNode has only constant exprs.
-   */
-  public boolean isConstantUnion() { return resultExprLists_.isEmpty(); }
-
-  /**
-   * Add a child tree plus its corresponding resolved resultExprs.
-   */
-  public void addChild(PlanNode node, List<Expr> baseTblResultExprs) {
-    super.addChild(node);
-    resultExprLists_.add(baseTblResultExprs);
-    if (baseTblResultExprs != null) {
-      // if we're materializing output, we can only do that into a single
-      // output tuple
-      Preconditions.checkState(tupleIds_.size() == 1, tupleIds_.size());
-    }
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = constExprLists_.size();
-    for (PlanNode child: children_) {
-      // ignore missing child cardinality info in the hope it won't matter enough
-      // to change the planning outcome
-      if (child.cardinality_ > 0) {
-        cardinality_ = addCardinalities(cardinality_, child.cardinality_);
-      }
-    }
-    // The number of nodes of a union node is -1 (invalid) if all the referenced tables
-    // are inline views (e.g. select 1 FROM (VALUES(1 x, 1 y)) a FULL OUTER JOIN
-    // (VALUES(1 x, 1 y)) b ON (a.x = b.y)). We need to set the correct value.
-    if (numNodes_ == -1) numNodes_ = 1;
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
-  }
-
-  /**
-   * Re-order the union's operands descending by their estimated per-host memory,
-   * such that parent nodes can gauge the peak memory consumption of this MergeNode after
-   * opening it during execution (a MergeNode opens its first operand in Open()).
-   * Scan nodes are always ordered last because they can dynamically scale down their
-   * memory usage, whereas many other nodes cannot (e.g., joins, aggregations).
-   * One goal is to decrease the likelihood of a SortNode parent claiming too much
-   * memory in its Open(), possibly causing the mem limit to be hit when subsequent
-   * union operands are executed.
-   * Can only be called on a fragmented plan because this function calls computeCosts()
-   * on this node's children.
-   * TODO: Come up with a good way of handing memory out to individual operators so that
-   * they don't trip each other up. Then remove this function.
-   */
-  public void reorderOperands(Analyzer analyzer) {
-    Preconditions.checkNotNull(fragment_,
-        "Operands can only be reordered on the fragmented plan.");
-
-    // List of estimated per-host memory consumption (first) by child index (second).
-    List<Pair<Long, Integer>> memByChildIdx = Lists.newArrayList();
-    for (int i = 0; i < children_.size(); ++i) {
-      PlanNode child = children_.get(i);
-      child.computeCosts(analyzer.getQueryCtx().request.getQuery_options());
-      memByChildIdx.add(new Pair<Long, Integer>(child.getPerHostMemCost(), i));
-    }
-
-    Collections.sort(memByChildIdx,
-        new Comparator<Pair<Long, Integer>>() {
-      public int compare(Pair<Long, Integer> a, Pair<Long, Integer> b) {
-        PlanNode aNode = children_.get(a.second);
-        PlanNode bNode = children_.get(b.second);
-        // Order scan nodes last because they can dynamically scale down their mem.
-        if (bNode instanceof ScanNode && !(aNode instanceof ScanNode)) return -1;
-        if (aNode instanceof ScanNode && !(bNode instanceof ScanNode)) return 1;
-        long diff = b.first - a.first;
-        return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-      }
-    });
-
-    List<List<Expr>> newResultExprLists = Lists.newArrayList();
-    ArrayList<PlanNode> newChildren = Lists.newArrayList();
-    for (Pair<Long, Integer> p: memByChildIdx) {
-      newResultExprLists.add(resultExprLists_.get(p.second));
-      newChildren.add(children_.get(p.second));
-    }
-    resultExprLists_ = newResultExprLists;
-    children_ = newChildren;
-  }
-
-  /**
-   * Must be called after addChild()/addConstExprList(). Computes the materialized
-   * result/const expr lists based on the materialized slots of this UnionNode's
-   * produced tuple. The UnionNode doesn't need an smap: like a ScanNode, it
-   * materializes an original tuple.
-   * There is no need to call assignConjuncts() because all non-constant conjuncts
-   * have already been assigned to the union operands, and all constant conjuncts have
-   * been evaluated during registration to set analyzer.hasEmptyResultSet_.
-   */
-  @Override
-  public void init(Analyzer analyzer) {
-    Preconditions.checkState(conjuncts_.isEmpty());
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-
-    // drop resultExprs/constExprs that aren't getting materialized (= where the
-    // corresponding output slot isn't being materialized)
-    materializedResultExprLists_.clear();
-    Preconditions.checkState(resultExprLists_.size() == children_.size());
-    List<SlotDescriptor> slots = analyzer.getDescTbl().getTupleDesc(tupleId_).getSlots();
-    for (int i = 0; i < resultExprLists_.size(); ++i) {
-      List<Expr> exprList = resultExprLists_.get(i);
-      List<Expr> newExprList = Lists.newArrayList();
-      Preconditions.checkState(exprList.size() == slots.size());
-      for (int j = 0; j < exprList.size(); ++j) {
-        if (slots.get(j).isMaterialized()) newExprList.add(exprList.get(j));
-      }
-      materializedResultExprLists_.add(
-          Expr.substituteList(newExprList, getChild(i).getOutputSmap(), analyzer, true));
-    }
-    Preconditions.checkState(
-        materializedResultExprLists_.size() == getChildren().size());
-
-    materializedConstExprLists_.clear();
-    for (List<Expr> exprList: constExprLists_) {
-      Preconditions.checkState(exprList.size() == slots.size());
-      List<Expr> newExprList = Lists.newArrayList();
-      for (int i = 0; i < exprList.size(); ++i) {
-        if (slots.get(i).isMaterialized()) newExprList.add(exprList.get(i));
-      }
-      materializedConstExprLists_.add(newExprList);
-    }
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    Preconditions.checkState(materializedResultExprLists_.size() == children_.size());
-    List<List<TExpr>> texprLists = Lists.newArrayList();
-    for (List<Expr> exprList: materializedResultExprLists_) {
-      texprLists.add(Expr.treesToThrift(exprList));
-    }
-    List<List<TExpr>> constTexprLists = Lists.newArrayList();
-    for (List<Expr> constTexprList: materializedConstExprLists_) {
-      constTexprLists.add(Expr.treesToThrift(constTexprList));
-    }
-    msg.union_node = new TUnionNode(tupleId_.asInt(), texprLists, constTexprLists);
-    msg.node_type = TPlanNodeType.UNION_NODE;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s:%s\n", prefix, id_.toString(), displayName_));
-    // A UnionNode may have predicates if a union is used inside an inline view,
-    // and the enclosing select stmt has predicates referring to the inline view.
-    if (!conjuncts_.isEmpty()) {
-      output.append(detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-    }
-    if (!constExprLists_.isEmpty()) {
-      output.append(detailPrefix + "constant-operands=" + constExprLists_.size() + "\n");
-    }
-    return output.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java b/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
deleted file mode 100644
index 3a45882..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
+++ /dev/null
@@ -1,107 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.CollectionTableRef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnnestNode;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * An UnnestNode scans over a collection materialized in memory, and returns
- * one row per item in the collection.
- * An UnnestNode can only appear in the plan tree of a SubplanNode.
- */
-public class UnnestNode extends PlanNode {
-  private final SubplanNode containingSubplanNode_;
-  private final CollectionTableRef tblRef_;
-  private final Expr collectionExpr_;
-
-  public UnnestNode(PlanNodeId id, SubplanNode containingSubplanNode,
-      CollectionTableRef tblRef) {
-    super(id, tblRef.getDesc().getId().asList(), "UNNEST");
-    containingSubplanNode_ = containingSubplanNode;
-    tblRef_ = tblRef;
-    collectionExpr_ = tblRef_.getCollectionExpr();
-    // Assume the collection expr has been fully resolved in analysis.
-    Preconditions.checkState(
-        collectionExpr_.isBoundByTupleIds(containingSubplanNode.getChild(0).tupleIds_));
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    // Do not assign binding predicates or predicates for enforcing slot equivalences
-    // because they must have been assigned in the scan node materializing the
-    // collection-typed slot.
-    super.init(analyzer);
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-
-    // Unnest is like a scan and must materialize the slots of its conjuncts.
-    analyzer.materializeSlots(conjuncts_);
-    computeMemLayout(analyzer);
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = PlannerContext.AVG_COLLECTION_SIZE;
-    // The containing SubplanNode has not yet been initialized, so get the number
-    // of nodes from the SubplanNode's input.
-    numNodes_ = containingSubplanNode_.getChild(0).getNumNodes();
-    cardinality_ = capAtLimit(cardinality_);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s [%s]\n", prefix, getDisplayLabel(),
-        getDisplayLabelDetail()));
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(String.format(
-          "%sparent-subplan=%s\n", detailPrefix, containingSubplanNode_.getId()));
-    }
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!conjuncts_.isEmpty()) {
-        output.append(
-            detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append(Joiner.on(".").join(tblRef_.getPath()));
-    if (tblRef_.hasExplicitAlias()) strBuilder.append(" " + tblRef_.getExplicitAlias());
-    return strBuilder.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.UNNEST_NODE;
-    msg.setUnnest_node(new TUnnestNode(collectionExpr_.treeToThrift()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java b/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
deleted file mode 100644
index e76da9d..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
+++ /dev/null
@@ -1,121 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.Predicate;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a two-sided interval of values. Either one of the sides
- * is optional, and can be exclusive or inclusive. For a range representing a single
- * value, both bounds are set.
- */
-public class ValueRange {
-  private final static Logger LOG = LoggerFactory.getLogger(ValueRange.class);
-
-  private Expr lowerBound_;
-  private boolean lowerBoundInclusive_;
-  private Expr upperBound_;
-  private boolean upperBoundInclusive_;
-
-  Expr getLowerBound() { return lowerBound_; }
-  void setLowerBound(Expr e) { lowerBound_ = e; }
-  boolean getLowerBoundInclusive() { return lowerBoundInclusive_; }
-  void setLowerBoundInclusive(boolean b) { lowerBoundInclusive_ = b; }
-  Expr getUpperBound() { return upperBound_; }
-  void setUpperBound(Expr e) { upperBound_ = e; }
-  boolean getUpperBoundInclusive() { return upperBoundInclusive_; }
-  void setUpperBoundInclusive(boolean b) { upperBoundInclusive_ = b; }
-
-  static public ValueRange createEqRange(Expr valueExpr) {
-    ValueRange result = new ValueRange();
-    result.lowerBound_ = valueExpr;
-    result.lowerBoundInclusive_ = true;
-    result.upperBound_ = valueExpr;
-    result.upperBoundInclusive_ = true;
-    return result;
-  }
-
-  public boolean isEqRange() {
-    return lowerBound_ == upperBound_ && lowerBoundInclusive_ && upperBoundInclusive_;
-  }
-
-  /**
-   * Determines whether a given constant expr is within the range.
-   * Does this by constructing predicate that represents the range,
-   * with the valueExpr inserted appropriately, and then calls the
-   * backend for evaluation.
-   */
-  public boolean isInRange(Analyzer analyzer, Expr valueExpr) throws
-      InternalException {
-    Preconditions.checkState(valueExpr.isConstant());
-    Preconditions.checkState(lowerBound_ != null || upperBound_ != null);
-
-    // construct predicate
-    Predicate p = null;
-    if (lowerBound_ != null && upperBound_ != null
-        && lowerBoundInclusive_ && upperBoundInclusive_
-        && lowerBound_ == upperBound_) {
-      // construct "=" predicate
-      p = new BinaryPredicate(BinaryPredicate.Operator.EQ, valueExpr, lowerBound_);
-    } else {
-      // construct range predicate
-      if (lowerBound_ != null) {
-        p = new BinaryPredicate(
-            lowerBoundInclusive_
-              ? BinaryPredicate.Operator.GE : BinaryPredicate.Operator.GT,
-            valueExpr, lowerBound_);
-      }
-      if (upperBound_ != null) {
-        Predicate p2 = new BinaryPredicate(
-            upperBoundInclusive_
-              ? BinaryPredicate.Operator.GE : BinaryPredicate.Operator.GT,
-            upperBound_, valueExpr);
-        if (p != null) {
-          p = new CompoundPredicate(CompoundPredicate.Operator.AND, p, p2);
-        } else {
-          p = p2;
-        }
-      }
-    }
-
-    Preconditions.checkState(p.isConstant());
-    // analyze to insert casts, etc.
-    try {
-      p.analyze(analyzer);
-    } catch (AnalysisException e) {
-      // this should never happen
-      throw new InternalException(
-          "couldn't analyze predicate " + p.toSql() + "\n" + e.toString());
-    }
-
-    // call backend
-    return FeSupport.EvalPredicate(p, analyzer.getQueryCtx());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java b/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
deleted file mode 100644
index e98ab571..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-/**
- * This class is meant to provide the FE with impalad backend configuration parameters,
- * including command line arguments.
- * TODO: Remove this class and either
- * a) Figure out if there's a standard way to access flags from java
- * b) Create a util/gflags.java that let's us access the be flags
- */
-public class BackendConfig {
-  public static BackendConfig INSTANCE = new BackendConfig();
-
-  // Default read block size (in bytes). This is the same as
-  // the default FLAGS_read_size used by the IO manager in the backend.
-  private final long READ_SIZE;
-
-  // This is overriden by JniFrontend/JniCatalog classes with user set configuration.
-  // TODO: Read this from backend instead of using static variables.
-  private static boolean allowAuthToLocalRules_ = false;
-
-  private BackendConfig() {
-    // TODO: Populate these by making calls to the backend instead of default constants.
-    READ_SIZE = 8 * 1024 * 1024L;
-  }
-
-  public long getReadSize() { return READ_SIZE; }
-
-  public static boolean isAuthToLocalEnabled() { return allowAuthToLocalRules_; }
-  public static void setAuthToLocal(boolean authToLocal) {
-    allowAuthToLocalRules_ = authToLocal;
-  }
-}



[27/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/HiveStorageDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/HiveStorageDescriptorFactory.java b/fe/src/main/java/com/cloudera/impala/catalog/HiveStorageDescriptorFactory.java
deleted file mode 100644
index 9644bbb..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/HiveStorageDescriptorFactory.java
+++ /dev/null
@@ -1,63 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.HashMap;
-
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.google.common.base.Preconditions;
-
-public class HiveStorageDescriptorFactory {
-  /**
-   * Creates and returns a Hive StoreDescriptor for the given FileFormat and RowFormat.
-   * Currently supports creating StorageDescriptors for Parquet, Text, Sequence, Avro and
-   * RC file.
-   * TODO: Add support for HBase
-   */
-  public static StorageDescriptor createSd(THdfsFileFormat fileFormat,
-      RowFormat rowFormat) {
-    Preconditions.checkNotNull(fileFormat);
-    Preconditions.checkNotNull(rowFormat);
-
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setSerdeInfo(new org.apache.hadoop.hive.metastore.api.SerDeInfo());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    // The compressed flag is not used to determine whether the table is compressed or
-    // not. Instead, we use the input format or the filename.
-    sd.setCompressed(false);
-    HdfsFileFormat hdfsFileFormat = HdfsFileFormat.fromThrift(fileFormat);
-    sd.setInputFormat(hdfsFileFormat.inputFormat());
-    sd.setOutputFormat(hdfsFileFormat.outputFormat());
-    sd.getSerdeInfo().setSerializationLib(hdfsFileFormat.serializationLib());
-
-    if (rowFormat.getFieldDelimiter() != null) {
-      sd.getSerdeInfo().putToParameters(
-          "serialization.format", rowFormat.getFieldDelimiter());
-      sd.getSerdeInfo().putToParameters("field.delim", rowFormat.getFieldDelimiter());
-    }
-    if (rowFormat.getEscapeChar() != null) {
-      sd.getSerdeInfo().putToParameters("escape.delim", rowFormat.getEscapeChar());
-    }
-    if (rowFormat.getLineDelimiter() != null) {
-      sd.getSerdeInfo().putToParameters("line.delim", rowFormat.getLineDelimiter());
-    }
-    return sd;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/ImpaladCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/ImpaladCatalog.java b/fe/src/main/java/com/cloudera/impala/catalog/ImpaladCatalog.java
deleted file mode 100644
index 02c9747..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/ImpaladCatalog.java
+++ /dev/null
@@ -1,448 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDataSource;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TRole;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheResponse;
-
-/**
- * Thread safe Catalog for an Impalad.  The Impalad catalog can be updated either via
- * a StateStore heartbeat or by directly applying the result of a catalog operation to
- * the CatalogCache. All updates are applied using the updateCatalog() function.
- * Table metadata is loaded lazily. The CatalogServer initially broadcasts (via the
- * statestore) the known table names (as IncompleteTables). These table names are added
- * to the Impalad catalog cache and when one of the tables is accessed, the impalad will
- * make an RPC to the CatalogServer to request loading the complete table metadata.
- * In both cases, we need to ensure that work from one update is not "undone" by another
- * update. To handle this the ImpaladCatalog does the following:
- * - Tracks the overall catalog version last received in a state store heartbeat, this
- *   version is maintained by the catalog server and it is always guaranteed that
- *   this impalad's catalog will never contain any objects < than this version
- *   (any updates with a lower version number are ignored).
- * - For updated/new objects, check if the object already exists in the
- *   catalog cache. If it does, only apply the update if the catalog version is > the
- *   existing object's catalog version. Also keep a log of all dropped catalog objects
- *   (and the version they were dropped in). Before updating any object, check if it was
- *   dropped in a later version. If so, ignore the update.
- * - Before dropping any catalog object, see if the object already exists in the catalog
- *   cache. If it does, only drop the object if the version of the drop is > that
- *   object's catalog version.
- * The CatalogServiceId is also tracked to detect if a different instance of the catalog
- * service has been started, in which case a full topic update is required.
- */
-public class ImpaladCatalog extends Catalog {
-  private static final Logger LOG = Logger.getLogger(ImpaladCatalog.class);
-  private static final TUniqueId INITIAL_CATALOG_SERVICE_ID = new TUniqueId(0L, 0L);
-
-  // The last known Catalog Service ID. If the ID changes, it indicates the CatalogServer
-  // has restarted.
-  private TUniqueId catalogServiceId_ = INITIAL_CATALOG_SERVICE_ID;
-
-  // The catalog version received in the last StateStore heartbeat. It is guaranteed
-  // all objects in the catalog have at a minimum, this version. Because updates may
-  // be applied out of band of a StateStore heartbeat, it is possible the catalog
-  // contains some objects > than this version.
-  private long lastSyncedCatalogVersion_ = Catalog.INITIAL_CATALOG_VERSION;
-
-  // Flag to determine if the Catalog is ready to accept user requests. See isReady().
-  private final AtomicBoolean isReady_ = new AtomicBoolean(false);
-
-  // Tracks modifications to this Impalad's catalog from direct updates to the cache.
-  private final CatalogDeltaLog catalogDeltaLog_ = new CatalogDeltaLog();
-
-  // Object that is used to synchronize on and signal when a catalog update is received.
-  private final Object catalogUpdateEventNotifier_ = new Object();
-
-  /**
-   * C'tor used by tests that need to validate the ImpaladCatalog outside of the
-   * CatalogServer.
-   */
-  public ImpaladCatalog() {
-    super(false);
-  }
-
-  /**
-   * Updates the internal Catalog based on the given TCatalogUpdateReq.
-   * This method:
-   * 1) Updates all databases in the Catalog
-   * 2) Updates all tables, views, and functions in the Catalog
-   * 3) Removes all dropped tables, views, and functions
-   * 4) Removes all dropped databases
-   *
-   * This method is called once per statestore heartbeat and is guaranteed the same
-   * object will not be in both the "updated" list and the "removed" list (it is
-   * a detail handled by the statestore).
-   * Catalog updates are ordered by the object type with the dependent objects coming
-   * first. That is, database "foo" will always come before table "foo.bar".
-   * Synchronized because updateCatalog() can be called by during a statestore update or
-   * during a direct-DDL operation and catalogServiceId_ and lastSyncedCatalogVersion_
-   * must be protected.
-   */
-  public synchronized TUpdateCatalogCacheResponse updateCatalog(
-    TUpdateCatalogCacheRequest req) throws CatalogException {
-    // Check for changes in the catalog service ID.
-    if (!catalogServiceId_.equals(req.getCatalog_service_id())) {
-      boolean firstRun = catalogServiceId_.equals(INITIAL_CATALOG_SERVICE_ID);
-      catalogServiceId_ = req.getCatalog_service_id();
-      if (!firstRun) {
-        // Throw an exception which will trigger a full topic update request.
-        throw new CatalogException("Detected catalog service ID change. Aborting " +
-            "updateCatalog()");
-      }
-    }
-
-    // First process all updates
-    long newCatalogVersion = lastSyncedCatalogVersion_;
-    for (TCatalogObject catalogObject: req.getUpdated_objects()) {
-      if (catalogObject.getType() == TCatalogObjectType.CATALOG) {
-        newCatalogVersion = catalogObject.getCatalog_version();
-      } else {
-        try {
-          addCatalogObject(catalogObject);
-        } catch (Exception e) {
-          LOG.error("Error adding catalog object: " + e.getMessage(), e);
-        }
-      }
-    }
-
-    // Now remove all objects from the catalog. Removing a database before removing
-    // its child tables/functions is fine. If that happens, the removal of the child
-    // object will be a no-op.
-    for (TCatalogObject catalogObject: req.getRemoved_objects()) {
-      removeCatalogObject(catalogObject, newCatalogVersion);
-    }
-    lastSyncedCatalogVersion_ = newCatalogVersion;
-    // Cleanup old entries in the log.
-    catalogDeltaLog_.garbageCollect(lastSyncedCatalogVersion_);
-    isReady_.set(true);
-
-    // Notify all the threads waiting on a catalog update.
-    synchronized (catalogUpdateEventNotifier_) {
-      catalogUpdateEventNotifier_.notifyAll();
-    }
-
-    return new TUpdateCatalogCacheResponse(catalogServiceId_);
-  }
-
-  /**
-   * Causes the calling thread to wait until a catalog update notification has been sent
-   * or the given timeout has been reached. A timeout value of 0 indicates an indefinite
-   * wait. Does not protect against spurious wakeups, so this should be called in a loop.
-   *
-   */
-  public void waitForCatalogUpdate(long timeoutMs) {
-    synchronized (catalogUpdateEventNotifier_) {
-      try {
-        catalogUpdateEventNotifier_.wait(timeoutMs);
-      } catch (InterruptedException e) {
-        // Ignore
-      }
-    }
-  }
-
-  /**
-   * Returns the Table object for the given dbName/tableName. Returns null
-   * if the table does not exist. Will throw a TableLoadingException if the table's
-   * metadata was not able to be loaded successfully and DatabaseNotFoundException
-   * if the parent database does not exist.
-   */
-  @Override
-  public Table getTable(String dbName, String tableName)
-      throws CatalogException {
-    Table table = super.getTable(dbName, tableName);
-    if (table == null) return null;
-
-    if (table.isLoaded() && table instanceof IncompleteTable) {
-      // If there were problems loading this table's metadata, throw an exception
-      // when it is accessed.
-      ImpalaException cause = ((IncompleteTable) table).getCause();
-      if (cause instanceof TableLoadingException) throw (TableLoadingException) cause;
-      throw new TableLoadingException("Missing metadata for table: " + tableName, cause);
-    }
-    return table;
-  }
-
-  /**
-   * Returns the HDFS path where the metastore would create the given table. If the table
-   * has a "location" set, that will be returned. Otherwise the path will be resolved
-   * based on the location of the parent database. The metastore folder hierarchy is:
-   * <warehouse directory>/<db name>.db/<table name>
-   * Except for items in the default database which will be:
-   * <warehouse directory>/<table name>
-   * This method handles both of these cases.
-   */
-  public Path getTablePath(org.apache.hadoop.hive.metastore.api.Table msTbl)
-      throws NoSuchObjectException, MetaException, TException {
-    try (MetaStoreClient msClient = getMetaStoreClient()) {
-      // If the table did not have its path set, build the path based on the the
-      // location property of the parent database.
-      if (msTbl.getSd().getLocation() == null || msTbl.getSd().getLocation().isEmpty()) {
-        String dbLocation =
-            msClient.getHiveClient().getDatabase(msTbl.getDbName()).getLocationUri();
-        return new Path(dbLocation, msTbl.getTableName().toLowerCase());
-      } else {
-        return new Path(msTbl.getSd().getLocation());
-      }
-    }
-  }
-
-  /**
-   *  Adds the given TCatalogObject to the catalog cache. The update may be ignored
-   *  (considered out of date) if:
-   *  1) An item exists in the catalog cache with a version > than the given
-   *     TCatalogObject's version.
-   *  2) The catalogDeltaLog_ contains an entry for this object with a version
-   *     > than the given TCatalogObject's version.
-   */
-  private void addCatalogObject(TCatalogObject catalogObject)
-      throws TableLoadingException, DatabaseNotFoundException {
-    // This item is out of date and should not be applied to the catalog.
-    if (catalogDeltaLog_.wasObjectRemovedAfter(catalogObject)) {
-      LOG.debug(String.format("Skipping update because a matching object was removed " +
-          "in a later catalog version: %s", catalogObject));
-      return;
-    }
-
-    switch(catalogObject.getType()) {
-      case DATABASE:
-        addDb(catalogObject.getDb(), catalogObject.getCatalog_version());
-        break;
-      case TABLE:
-      case VIEW:
-        addTable(catalogObject.getTable(), catalogObject.getCatalog_version());
-        break;
-      case FUNCTION:
-        addFunction(catalogObject.getFn(), catalogObject.getCatalog_version());
-        break;
-      case DATA_SOURCE:
-        addDataSource(catalogObject.getData_source(), catalogObject.getCatalog_version());
-        break;
-      case ROLE:
-        Role role = Role.fromThrift(catalogObject.getRole());
-        role.setCatalogVersion(catalogObject.getCatalog_version());
-        authPolicy_.addRole(role);
-        break;
-      case PRIVILEGE:
-        RolePrivilege privilege =
-            RolePrivilege.fromThrift(catalogObject.getPrivilege());
-        privilege.setCatalogVersion(catalogObject.getCatalog_version());
-        try {
-          authPolicy_.addPrivilege(privilege);
-        } catch (CatalogException e) {
-          LOG.error("Error adding privilege: ", e);
-        }
-        break;
-      case HDFS_CACHE_POOL:
-        HdfsCachePool cachePool = new HdfsCachePool(catalogObject.getCache_pool());
-        cachePool.setCatalogVersion(catalogObject.getCatalog_version());
-        hdfsCachePools_.add(cachePool);
-        break;
-      default:
-        throw new IllegalStateException(
-            "Unexpected TCatalogObjectType: " + catalogObject.getType());
-    }
-  }
-
-  /**
-   *  Removes the matching TCatalogObject from the catalog, if one exists and its
-   *  catalog version is < the catalog version of this drop operation.
-   *  Note that drop operations that come from statestore heartbeats always have a
-   *  version of 0. To determine the drop version for statestore updates,
-   *  the catalog version from the current update is used. This is okay because there
-   *  can never be a catalog update from the statestore that contains a drop
-   *  and an addition of the same object. For more details on how drop
-   *  versioning works, see CatalogServerCatalog.java
-   */
-  private void removeCatalogObject(TCatalogObject catalogObject,
-      long currentCatalogUpdateVersion) {
-    // The TCatalogObject associated with a drop operation from a state store
-    // heartbeat will always have a version of zero. Because no update from
-    // the state store can contain both a drop and an addition of the same object,
-    // we can assume the drop version is the current catalog version of this update.
-    // If the TCatalogObject contains a version that != 0, it indicates the drop
-    // came from a direct update.
-    long dropCatalogVersion = catalogObject.getCatalog_version() == 0 ?
-        currentCatalogUpdateVersion : catalogObject.getCatalog_version();
-
-    switch(catalogObject.getType()) {
-      case DATABASE:
-        removeDb(catalogObject.getDb(), dropCatalogVersion);
-        break;
-      case TABLE:
-      case VIEW:
-        removeTable(catalogObject.getTable(), dropCatalogVersion);
-        break;
-      case FUNCTION:
-        removeFunction(catalogObject.getFn(), dropCatalogVersion);
-        break;
-      case DATA_SOURCE:
-        removeDataSource(catalogObject.getData_source(), dropCatalogVersion);
-        break;
-      case ROLE:
-        removeRole(catalogObject.getRole(), dropCatalogVersion);
-        break;
-      case PRIVILEGE:
-        removePrivilege(catalogObject.getPrivilege(), dropCatalogVersion);
-        break;
-      case HDFS_CACHE_POOL:
-        HdfsCachePool existingItem =
-            hdfsCachePools_.get(catalogObject.getCache_pool().getPool_name());
-        if (existingItem.getCatalogVersion() > catalogObject.getCatalog_version()) {
-          hdfsCachePools_.remove(catalogObject.getCache_pool().getPool_name());
-        }
-        break;
-      default:
-        throw new IllegalStateException(
-            "Unexpected TCatalogObjectType: " + catalogObject.getType());
-    }
-
-    if (catalogObject.getCatalog_version() > lastSyncedCatalogVersion_) {
-      catalogDeltaLog_.addRemovedObject(catalogObject);
-    }
-  }
-
-  private void addDb(TDatabase thriftDb, long catalogVersion) {
-    Db existingDb = getDb(thriftDb.getDb_name());
-    if (existingDb == null ||
-        existingDb.getCatalogVersion() < catalogVersion) {
-      Db newDb = Db.fromTDatabase(thriftDb, this);
-      newDb.setCatalogVersion(catalogVersion);
-      addDb(newDb);
-    }
-  }
-
-  private void addTable(TTable thriftTable, long catalogVersion)
-      throws TableLoadingException {
-    Db db = getDb(thriftTable.db_name);
-    if (db == null) {
-      LOG.debug("Parent database of table does not exist: " +
-          thriftTable.db_name + "." + thriftTable.tbl_name);
-      return;
-    }
-
-    Table newTable = Table.fromThrift(db, thriftTable);
-    newTable.setCatalogVersion(catalogVersion);
-    db.addTable(newTable);
-  }
-
-  private void addFunction(TFunction fn, long catalogVersion) {
-    Function function = Function.fromThrift(fn);
-    function.setCatalogVersion(catalogVersion);
-    Db db = getDb(function.getFunctionName().getDb());
-    if (db == null) {
-      LOG.debug("Parent database of function does not exist: " + function.getName());
-      return;
-    }
-    Function existingFn = db.getFunction(fn.getSignature());
-    if (existingFn == null ||
-        existingFn.getCatalogVersion() < catalogVersion) {
-      db.addFunction(function);
-    }
-  }
-
-  private void addDataSource(TDataSource thrift, long catalogVersion) {
-    DataSource dataSource = DataSource.fromThrift(thrift);
-    dataSource.setCatalogVersion(catalogVersion);
-    addDataSource(dataSource);
-  }
-
-  private void removeDataSource(TDataSource thrift, long dropCatalogVersion) {
-    removeDataSource(thrift.getName());
-  }
-
-  private void removeDb(TDatabase thriftDb, long dropCatalogVersion) {
-    Db db = getDb(thriftDb.getDb_name());
-    if (db != null && db.getCatalogVersion() < dropCatalogVersion) {
-      removeDb(db.getName());
-    }
-  }
-
-  private void removeTable(TTable thriftTable, long dropCatalogVersion) {
-    Db db = getDb(thriftTable.db_name);
-    // The parent database doesn't exist, nothing to do.
-    if (db == null) return;
-
-    Table table = db.getTable(thriftTable.getTbl_name());
-    if (table != null && table.getCatalogVersion() < dropCatalogVersion) {
-      db.removeTable(thriftTable.tbl_name);
-    }
-  }
-
-  private void removeFunction(TFunction thriftFn, long dropCatalogVersion) {
-    Db db = getDb(thriftFn.name.getDb_name());
-    // The parent database doesn't exist, nothing to do.
-    if (db == null) return;
-
-    // If the function exists and it has a catalog version less than the
-    // version of the drop, remove the function.
-    Function fn = db.getFunction(thriftFn.getSignature());
-    if (fn != null && fn.getCatalogVersion() < dropCatalogVersion) {
-      db.removeFunction(thriftFn.getSignature());
-    }
-  }
-
-  private void removeRole(TRole thriftRole, long dropCatalogVersion) {
-    Role existingRole = authPolicy_.getRole(thriftRole.getRole_name());
-    // version of the drop, remove the function.
-    if (existingRole != null && existingRole.getCatalogVersion() < dropCatalogVersion) {
-      authPolicy_.removeRole(thriftRole.getRole_name());
-    }
-  }
-
-  private void removePrivilege(TPrivilege thriftPrivilege, long dropCatalogVersion) {
-    Role role = authPolicy_.getRole(thriftPrivilege.getRole_id());
-    if (role == null) return;
-    RolePrivilege existingPrivilege =
-        role.getPrivilege(thriftPrivilege.getPrivilege_name());
-    // version of the drop, remove the function.
-    if (existingPrivilege != null &&
-        existingPrivilege.getCatalogVersion() < dropCatalogVersion) {
-      role.removePrivilege(thriftPrivilege.getPrivilege_name());
-    }
-  }
-
-  /**
-   * Returns true if the ImpaladCatalog is ready to accept requests (has
-   * received and processed a valid catalog topic update from the StateStore),
-   * false otherwise.
-   */
-  public boolean isReady() { return isReady_.get(); }
-
-  // Only used for testing.
-  public void setIsReady(boolean isReady) { isReady_.set(isReady); }
-  public AuthorizationPolicy getAuthPolicy() { return authPolicy_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/IncompleteTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/IncompleteTable.java b/fe/src/main/java/com/cloudera/impala/catalog/IncompleteTable.java
deleted file mode 100644
index 88bab5e..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/IncompleteTable.java
+++ /dev/null
@@ -1,134 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a table with incomplete metadata. The metadata may be incomplete because
- * it has not yet been loaded or because of errors encountered during the loading
- * process.
- */
-public class IncompleteTable extends Table {
-  // The cause for the incomplete metadata. If there is no cause given (cause_ = null),
-  // then this is assumed to be an uninitialized table (table that does not have
-  // its metadata loaded).
-  private ImpalaException cause_;
-
-  private IncompleteTable(TableId id, Db db, String name,
-      ImpalaException cause) {
-    super(id, null, db, name, null);
-    cause_ = cause;
-  }
-
-  /**
-   * Returns the cause (ImpalaException) which led to this table's metadata being
-   * incomplete.
-   */
-  public ImpalaException getCause() { return cause_; }
-
-  /**
-   * See comment on cause_.
-   */
-  @Override
-  public boolean isLoaded() { return cause_ != null; }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.TABLE; }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    throw new IllegalStateException(cause_);
-  }
-
-  @Override
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    if (cause_ instanceof TableLoadingException) {
-      throw (TableLoadingException) cause_;
-    } else {
-      throw new TableLoadingException("Table metadata incomplete: ", cause_);
-    }
-  }
-
-  @Override
-  public TTable toThrift() {
-    TTable table = new TTable(db_.getName(), name_);
-    table.setId(id_.asInt());
-    if (cause_ != null) {
-      table.setLoad_status(new TStatus(TErrorCode.INTERNAL_ERROR,
-          Lists.newArrayList(JniUtil.throwableToString(cause_),
-                             JniUtil.throwableToStackTrace(cause_))));
-    }
-    return table;
-  }
-
-  @Override
-  protected void loadFromThrift(TTable thriftTable) throws TableLoadingException {
-    if (thriftTable.isSetLoad_status()) {
-      // Since the load status is set, it indicates the table is incomplete due to
-      // an error loading the table metadata. The error message in the load status
-      // should provide details on why. By convention, the final error message should
-      // be the remote (Catalog Server) call stack. This shouldn't be displayed to the
-      // user under normal circumstances, but needs to be recorded somewhere so append
-      // it to the call stack of the local TableLoadingException created here.
-      // TODO: Provide a mechanism (query option?) to optionally allow returning more
-      // detailed errors (including the full call stack(s)) to the user.
-      List<String> errorMsgs = thriftTable.getLoad_status().getError_msgs();
-      String callStackStr = "<None available>";
-      if (errorMsgs.size() > 1) callStackStr = errorMsgs.remove(errorMsgs.size() - 1);
-
-      String errorMsg = Joiner.on("\n").join(errorMsgs);
-      // The errorMsg will always be prefixed with "ExceptionClassName: ". Since we treat
-      // all errors as TableLoadingExceptions, the prefix "TableLoadingException" is
-      // redundant and can be stripped out.
-      errorMsg = errorMsg.replaceFirst("^TableLoadingException: ", "");
-      TableLoadingException loadingException = new TableLoadingException(errorMsg);
-      List<StackTraceElement> stackTrace =
-          Lists.newArrayList(loadingException.getStackTrace());
-      stackTrace.add(new StackTraceElement("========",
-          "<Remote stack trace on catalogd>: " + callStackStr, "", -1));
-      loadingException.setStackTrace(
-          stackTrace.toArray(new StackTraceElement[stackTrace.size()]));
-      this.cause_ = loadingException;
-    }
-  }
-
-  public static IncompleteTable createUninitializedTable(TableId id, Db db,
-      String name) {
-    return new IncompleteTable(id, db, name, null);
-  }
-
-  public static IncompleteTable createFailedMetadataLoadTable(TableId id, Db db,
-      String name, ImpalaException e) {
-    return new IncompleteTable(id, db, name, e);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/KuduColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/KuduColumn.java b/fe/src/main/java/com/cloudera/impala/catalog/KuduColumn.java
deleted file mode 100644
index 5e272d8..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/KuduColumn.java
+++ /dev/null
@@ -1,52 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.thrift.TColumn;
-
-/**
- *  Describes a Kudu column mapped to a Hive column (as described in the metastore).
- *  This class extends Column with Kudu-specific information about whether it is part of a primary
- *  key, and whether it is nullable.
- */
-public class KuduColumn extends Column {
-  private final boolean isKey_;
-  private final boolean isNullable_;
-
-  public KuduColumn(String name, boolean isKey, boolean isNullable, Type type,
-      String comment, int position) {
-    super(name, type, comment, position);
-    isKey_ = isKey;
-    isNullable_ = isNullable;
-  }
-
-  public boolean isKey() { return isKey_; }
-  public boolean isNullable() { return isNullable_; }
-
-  @Override
-  public TColumn toThrift() {
-    TColumn colDesc = new TColumn(name_, type_.toThrift());
-    if (comment_ != null) colDesc.setComment(comment_);
-    colDesc.setCol_stats(getStats().toThrift());
-    colDesc.setPosition(position_);
-    colDesc.setIs_kudu_column(true);
-    colDesc.setIs_key(isKey_);
-    colDesc.setIs_nullable(isNullable_);
-    return colDesc;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/KuduTable.java b/fe/src/main/java/com/cloudera/impala/catalog/KuduTable.java
deleted file mode 100644
index 71d897d..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/KuduTable.java
+++ /dev/null
@@ -1,278 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.xml.bind.DatatypeConverter;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.log4j.Logger;
-import org.apache.kudu.client.KuduClient;
-import org.apache.kudu.client.LocatedTablet;
-
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TKuduTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.KuduUtil;
-import com.cloudera.impala.util.TResultRowBuilder;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Impala representation of a Kudu table.
- *
- * The Kudu-related metadata is stored in the Metastore table's table properties.
- */
-public class KuduTable extends Table {
-  private static final Logger LOG = Logger.getLogger(Table.class);
-
-  // Alias to the string key that identifies the storage handler for Kudu tables.
-  public static final String KEY_STORAGE_HANDLER =
-      hive_metastoreConstants.META_TABLE_STORAGE;
-
-  // Key to access the table name from the table properties
-  public static final String KEY_TABLE_NAME = "kudu.table_name";
-
-  // Key to access the columns used to build the (composite) key of the table.
-  // The order of the keys is important.
-  public static final String KEY_KEY_COLUMNS = "kudu.key_columns";
-
-  // Key to access the master address from the table properties. Error handling for
-  // this string is done in the KuduClient library.
-  // TODO we should have something like KuduConfig.getDefaultConfig()
-  public static final String KEY_MASTER_ADDRESSES = "kudu.master_addresses";
-
-  // Kudu specific value for the storage handler table property keyed by
-  // KEY_STORAGE_HANDLER.
-  public static final String KUDU_STORAGE_HANDLER =
-      "com.cloudera.kudu.hive.KuduStorageHandler";
-
-  // Key to specify the number of tablet replicas.
-  // TODO(KUDU): Allow modification in alter table.
-  public static final String KEY_TABLET_REPLICAS = "kudu.num_tablet_replicas";
-
-  public static final long KUDU_RPC_TIMEOUT_MS = 50000;
-
-  // The name of the table in Kudu.
-  private String kuduTableName_;
-
-  // Comma separated list of Kudu master hosts with optional ports.
-  private String kuduMasters_;
-
-  // The set of columns that are key columns in Kudu.
-  private ImmutableList<String> kuduKeyColumnNames_;
-
-  protected KuduTable(TableId id, org.apache.hadoop.hive.metastore.api.Table msTable,
-      Db db, String name, String owner) {
-    super(id, msTable, db, name, owner);
-  }
-
-  public TKuduTable getKuduTable() {
-    TKuduTable tbl = new TKuduTable();
-    tbl.setKey_columns(Preconditions.checkNotNull(kuduKeyColumnNames_));
-    tbl.setMaster_addresses(Lists.newArrayList(kuduMasters_.split(",")));
-    tbl.setTable_name(kuduTableName_);
-    return tbl;
-  }
-
-  @Override
-  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
-    TTableDescriptor desc = new TTableDescriptor(id_.asInt(), TTableType.KUDU_TABLE,
-        getTColumnDescriptors(), numClusteringCols_, kuduTableName_, db_.getName());
-    desc.setKuduTable(getKuduTable());
-    return desc;
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.TABLE; }
-
-  @Override
-  public String getStorageHandlerClassName() { return KUDU_STORAGE_HANDLER; }
-
-  /**
-   * Returns the columns in the order they have been created
-   */
-  @Override
-  public ArrayList<Column> getColumnsInHiveOrder() { return getColumns(); }
-
-  public static boolean isKuduTable(org.apache.hadoop.hive.metastore.api.Table mstbl) {
-    return KUDU_STORAGE_HANDLER.equals(mstbl.getParameters().get(KEY_STORAGE_HANDLER));
-  }
-
-  /**
-   * Load the columns from the schema list
-   */
-  private void loadColumns(List<FieldSchema> schema, IMetaStoreClient client,
-      Set<String> keyColumns) throws TableLoadingException {
-
-    if (keyColumns.size() == 0 || keyColumns.size() > schema.size()) {
-      throw new TableLoadingException(String.format("Kudu tables must have at least one"
-          + "key column (had %d), and no more key columns than there are table columns "
-          + "(had %d).", keyColumns.size(), schema.size()));
-    }
-
-    clearColumns();
-    Set<String> columnNames = Sets.newHashSet();
-    int pos = 0;
-    for (FieldSchema field: schema) {
-      com.cloudera.impala.catalog.Type type = parseColumnType(field);
-      // TODO(kudu-merge): Check for decimal types?
-      boolean isKey = keyColumns.contains(field.getName());
-      KuduColumn col = new KuduColumn(field.getName(), isKey, !isKey, type,
-          field.getComment(), pos);
-      columnNames.add(col.getName());
-      addColumn(col);
-      ++pos;
-    }
-
-    if (!columnNames.containsAll(keyColumns)) {
-      throw new TableLoadingException(String.format("Some key columns were not found in"
-              + " the set of columns. List of column names: %s, List of key column names:"
-              + " %s", Iterables.toString(columnNames), Iterables.toString(keyColumns)));
-    }
-
-    kuduKeyColumnNames_ = ImmutableList.copyOf(keyColumns);
-
-    loadAllColumnStats(client);
-  }
-
-  @Override
-  public void load(boolean reuseMetadata, IMetaStoreClient client,
-      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
-    // TODO handle 'reuseMetadata'
-    if (getMetaStoreTable() == null || !tableParamsAreValid(msTbl.getParameters())) {
-      throw new TableLoadingException(String.format(
-          "Cannot load Kudu table %s, table is corrupt.", name_));
-    }
-
-    msTable_ = msTbl;
-    kuduTableName_ = msTbl.getParameters().get(KEY_TABLE_NAME);
-    kuduMasters_ = msTbl.getParameters().get(KEY_MASTER_ADDRESSES);
-
-    String keyColumnsProp = Preconditions.checkNotNull(msTbl.getParameters()
-        .get(KEY_KEY_COLUMNS).toLowerCase(), "'kudu.key_columns' cannot be null.");
-    Set<String> keyColumns = KuduUtil.parseKeyColumns(keyColumnsProp);
-
-    // Load the rest of the data from the table parameters directly
-    loadColumns(msTbl.getSd().getCols(), client, keyColumns);
-
-    numClusteringCols_ = 0;
-
-    // Get row count from stats
-    numRows_ = getRowCount(getMetaStoreTable().getParameters());
-  }
-
-  @Override
-  public TTable toThrift() {
-    TTable table = super.toThrift();
-    table.setTable_type(TTableType.KUDU_TABLE);
-    table.setKudu_table(getKuduTable());
-    return table;
-  }
-
-  @Override
-  protected void loadFromThrift(TTable thriftTable) throws TableLoadingException {
-    super.loadFromThrift(thriftTable);
-    TKuduTable tkudu = thriftTable.getKudu_table();
-    kuduTableName_ = tkudu.getTable_name();
-    kuduMasters_ = Joiner.on(',').join(tkudu.getMaster_addresses());
-    kuduKeyColumnNames_ = ImmutableList.copyOf(tkudu.getKey_columns());
-  }
-
-  public String getKuduTableName() { return kuduTableName_; }
-  public String getKuduMasterAddresses() { return kuduMasters_; }
-  public int getNumKeyColumns() { return kuduKeyColumnNames_.size(); }
-
-  /**
-   * Returns true if all required parameters are present in the given table properties
-   * map.
-   * TODO(kudu-merge) Return a more specific error string.
-   */
-  public static boolean tableParamsAreValid(Map<String, String> params) {
-    return params.get(KEY_TABLE_NAME) != null && params.get(KEY_TABLE_NAME).length() > 0
-        && params.get(KEY_MASTER_ADDRESSES) != null
-        && params.get(KEY_MASTER_ADDRESSES).length() > 0
-        && params.get(KEY_KEY_COLUMNS) != null
-        && params.get(KEY_KEY_COLUMNS).length() > 0;
-   }
-
-  /**
-   * The number of nodes is not know ahead of time and will be updated during computeStats
-   * in the scan node.
-   */
-  public int getNumNodes() { return -1; }
-
-  public List<String> getKuduKeyColumnNames() { return kuduKeyColumnNames_; }
-
-  public TResultSet getTableStats() throws ImpalaRuntimeException {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-
-    resultSchema.addToColumns(new TColumn("# Rows", Type.INT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Start Key", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Stop Key", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Leader Replica", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("# Replicas", Type.INT.toThrift()));
-
-    try (KuduClient client = new KuduClient.KuduClientBuilder(
-        getKuduMasterAddresses()).build()) {
-      org.apache.kudu.client.KuduTable kuduTable = client.openTable(kuduTableName_);
-      List<LocatedTablet> tablets =
-          kuduTable.getTabletsLocations(KUDU_RPC_TIMEOUT_MS);
-      for (LocatedTablet tab: tablets) {
-        TResultRowBuilder builder = new TResultRowBuilder();
-        builder.add("-1");   // The Kudu client API doesn't expose tablet row counts.
-        builder.add(DatatypeConverter.printHexBinary(
-            tab.getPartition().getPartitionKeyStart()));
-        builder.add(DatatypeConverter.printHexBinary(
-            tab.getPartition().getPartitionKeyEnd()));
-        LocatedTablet.Replica leader = tab.getLeaderReplica();
-        if (leader == null) {
-          // Leader might be null, if it is not yet available (e.g. during
-          // leader election in Kudu)
-          builder.add("Leader n/a");
-        } else {
-          builder.add(leader.getRpcHost() + ":" + leader.getRpcPort().toString());
-        }
-        builder.add(tab.getReplicas().size());
-        result.addToRows(builder.get());
-      }
-
-    } catch (Exception e) {
-      throw new ImpalaRuntimeException("Could not communicate with Kudu.", e);
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/MapType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/MapType.java b/fe/src/main/java/com/cloudera/impala/catalog/MapType.java
deleted file mode 100644
index 8bc7b05..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/MapType.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import org.apache.commons.lang3.StringUtils;
-
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Describes a MAP type. MAP types have a scalar key and an arbitrarily-typed value.
- */
-public class MapType extends Type {
-  private final Type keyType_;
-  private final Type valueType_;
-
-  public MapType(Type keyType, Type valueType) {
-    Preconditions.checkNotNull(keyType);
-    Preconditions.checkNotNull(valueType);
-    keyType_ = keyType;
-    valueType_ = valueType;
-  }
-
-  public Type getKeyType() { return keyType_; }
-  public Type getValueType() { return valueType_; }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof MapType)) return false;
-    MapType otherMapType = (MapType) other;
-    return otherMapType.keyType_.equals(keyType_) &&
-        otherMapType.valueType_.equals(valueType_);
-  }
-
-  @Override
-  public String toSql(int depth) {
-    if (depth >= MAX_NESTING_DEPTH) return "MAP<...>";
-    return String.format("MAP<%s,%s>",
-        keyType_.toSql(depth + 1), valueType_.toSql(depth + 1));
-  }
-
-  @Override
-  protected String prettyPrint(int lpad) {
-    String leftPadding = StringUtils.repeat(' ', lpad);
-    if (valueType_.isScalarType()) return leftPadding + toSql();
-    // Pass in the padding to make sure nested fields are aligned properly,
-    // even if we then strip the top-level padding.
-    String structStr = valueType_.prettyPrint(lpad);
-    structStr = structStr.substring(lpad);
-    return String.format("%sMAP<%s,%s>", leftPadding, keyType_.toSql(), structStr);
-  }
-
-  @Override
-  public void toThrift(TColumnType container) {
-    TTypeNode node = new TTypeNode();
-    container.types.add(node);
-    Preconditions.checkNotNull(keyType_);
-    Preconditions.checkNotNull(valueType_);
-    node.setType(TTypeNodeType.MAP);
-    keyType_.toThrift(container);
-    valueType_.toThrift(container);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/MetaStoreClientPool.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/MetaStoreClientPool.java b/fe/src/main/java/com/cloudera/impala/catalog/MetaStoreClientPool.java
deleted file mode 100644
index 40eb4cf..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/MetaStoreClientPool.java
+++ /dev/null
@@ -1,189 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHook;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Manages a pool of RetryingMetaStoreClient connections. If the connection pool is empty
- * a new client is created and added to the pool. The idle pool can expand till a maximum
- * size of MAX_HMS_CONNECTION_POOL_SIZE, beyond which the connections are closed.
- */
-public class MetaStoreClientPool {
-  // Key for config option read from hive-site.xml
-  private static final String HIVE_METASTORE_CNXN_DELAY_MS_CONF =
-      "impala.catalog.metastore.cnxn.creation.delay.ms";
-  private static final int DEFAULT_HIVE_METASTORE_CNXN_DELAY_MS_CONF = 0;
-  // Maximum number of idle metastore connections in the connection pool at any point.
-  private static final int MAX_HMS_CONNECTION_POOL_SIZE = 32;
-  // Number of milliseconds to sleep between creation of HMS connections. Used to debug
-  // IMPALA-825.
-  private final int clientCreationDelayMs_;
-
-  private static final Logger LOG = Logger.getLogger(MetaStoreClientPool.class);
-
-  private final ConcurrentLinkedQueue<MetaStoreClient> clientPool_ =
-      new ConcurrentLinkedQueue<MetaStoreClient>();
-  private Boolean poolClosed_ = false;
-  private final Object poolCloseLock_ = new Object();
-  private final HiveConf hiveConf_;
-
-  // Required for creating an instance of RetryingMetaStoreClient.
-  private static final HiveMetaHookLoader dummyHookLoader = new HiveMetaHookLoader() {
-    @Override
-    public HiveMetaHook getHook(org.apache.hadoop.hive.metastore.api.Table tbl)
-        throws MetaException {
-      return null;
-    }
-  };
-
-  /**
-   * A wrapper around the RetryingMetaStoreClient that manages interactions with the
-   * connection pool. This implements the AutoCloseable interface and hence the callers
-   * should use the try-with-resources statement while creating an instance.
-   */
-  public class MetaStoreClient implements AutoCloseable {
-    private final IMetaStoreClient hiveClient_;
-    private boolean isInUse_;
-
-    private MetaStoreClient(HiveConf hiveConf) {
-      try {
-        LOG.debug("Creating MetaStoreClient. Pool Size = " + clientPool_.size());
-        hiveClient_ = RetryingMetaStoreClient.getProxy(hiveConf, dummyHookLoader,
-            HiveMetaStoreClient.class.getName());
-      } catch (Exception e) {
-        // Turn in to an unchecked exception
-        throw new IllegalStateException(e);
-      }
-      isInUse_ = false;
-    }
-
-    /**
-     * Returns the internal RetryingMetaStoreClient object.
-     */
-    public IMetaStoreClient getHiveClient() {
-      return hiveClient_;
-    }
-
-    /**
-     * Returns this client back to the connection pool. If the connection pool has been
-     * closed, just close the Hive client connection.
-     */
-    @Override
-    public void close() {
-      Preconditions.checkState(isInUse_);
-      isInUse_ = false;
-      // Ensure the connection isn't returned to the pool if the pool has been closed
-      // or if the number of connections in the pool exceeds MAX_HMS_CONNECTION_POOL_SIZE.
-      // This lock is needed to ensure proper behavior when a thread reads poolClosed
-      // is false, but a call to pool.close() comes in immediately afterward.
-      synchronized (poolCloseLock_) {
-        if (poolClosed_ || clientPool_.size() >= MAX_HMS_CONNECTION_POOL_SIZE) {
-          hiveClient_.close();
-        } else {
-          clientPool_.offer(this);
-        }
-      }
-    }
-
-    // Marks this client as in use
-    private void markInUse() {
-      Preconditions.checkState(!isInUse_);
-      isInUse_ = true;
-    }
-  }
-
-  public MetaStoreClientPool(int initialSize) {
-    this(initialSize, new HiveConf(MetaStoreClientPool.class));
-  }
-
-  public MetaStoreClientPool(int initialSize, HiveConf hiveConf) {
-    hiveConf_ = hiveConf;
-    clientCreationDelayMs_ = hiveConf_.getInt(HIVE_METASTORE_CNXN_DELAY_MS_CONF,
-        DEFAULT_HIVE_METASTORE_CNXN_DELAY_MS_CONF);
-    addClients(initialSize);
-  }
-
-  /**
-   * Add numClients to the client pool.
-   */
-  public void addClients(int numClients) {
-    for (int i = 0; i < numClients; ++i) {
-      clientPool_.add(new MetaStoreClient(hiveConf_));
-    }
-  }
-
-  /**
-   * Gets a client from the pool. If the pool is empty a new client is created.
-   */
-  public MetaStoreClient getClient() {
-    // The MetaStoreClient c'tor relies on knowing the Hadoop version by asking
-    // org.apache.hadoop.util.VersionInfo. The VersionInfo class relies on opening
-    // the 'common-version-info.properties' file as a resource from hadoop-common*.jar
-    // using the Thread's context classloader. If necessary, set the Thread's context
-    // classloader, otherwise VersionInfo will fail in it's c'tor.
-    if (Thread.currentThread().getContextClassLoader() == null) {
-      Thread.currentThread().setContextClassLoader(ClassLoader.getSystemClassLoader());
-    }
-
-    MetaStoreClient client = clientPool_.poll();
-    // The pool was empty so create a new client and return that.
-    // Serialize client creation to defend against possible race conditions accessing
-    // local Kerberos state (see IMPALA-825).
-    if (client == null) {
-      synchronized (this) {
-        try {
-          Thread.sleep(clientCreationDelayMs_);
-        } catch (InterruptedException e) {
-          /* ignore */
-        }
-        client = new MetaStoreClient(hiveConf_);
-      }
-    }
-    client.markInUse();
-    return client;
-  }
-
-  /**
-   * Removes all items from the connection pool and closes all Hive Meta Store client
-   * connections. Can be called multiple times.
-   */
-  public void close() {
-    // Ensure no more items get added to the pool once close is called.
-    synchronized (poolCloseLock_) {
-      if (poolClosed_) { return; }
-      poolClosed_ = true;
-    }
-
-    MetaStoreClient client = null;
-    while ((client = clientPool_.poll()) != null) {
-      client.getHiveClient().close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/PartitionNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/PartitionNotFoundException.java b/fe/src/main/java/com/cloudera/impala/catalog/PartitionNotFoundException.java
deleted file mode 100644
index 9964a3c..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/PartitionNotFoundException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-
-/**
- * Thrown when a partition cannot be found in the catalog.
- */
-public class PartitionNotFoundException extends CatalogException {
-  // Dummy serial UID to avoid Eclipse warnings
-  private static final long serialVersionUID = -2203080667446640542L;
-
-  public PartitionNotFoundException(String s) { super(s); }
-
-  public PartitionNotFoundException(String s, Exception cause) { super(s, cause); }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/PartitionStatsUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/PartitionStatsUtil.java b/fe/src/main/java/com/cloudera/impala/catalog/PartitionStatsUtil.java
deleted file mode 100644
index 2f2022f..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/PartitionStatsUtil.java
+++ /dev/null
@@ -1,141 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.util.MetaStoreUtil;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.TException;
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-
-/**
- * Handles serialising and deserialising intermediate statistics from the Hive MetaStore
- * via the parameters map attached to every Hive partition object.
- */
-public class PartitionStatsUtil {
-  public static final String INCREMENTAL_STATS_NUM_CHUNKS =
-      "impala_intermediate_stats_num_chunks";
-
-  public static final String INCREMENTAL_STATS_CHUNK_PREFIX =
-      "impala_intermediate_stats_chunk";
-
-  private final static Logger LOG = LoggerFactory.getLogger(PartitionStatsUtil.class);
-
-  /**
-   * Reconstructs a TPartitionStats object from its serialised form in the given parameter
-   * map. Returns null if no stats are serialised, and throws an exception if there was an
-   * error during deserialisation.
-   */
-  public static TPartitionStats partStatsFromParameters(
-      Map<String, String> hmsParameters) throws ImpalaException {
-    if (hmsParameters == null) return null;
-    String numChunksStr = hmsParameters.get(INCREMENTAL_STATS_NUM_CHUNKS);
-    if (numChunksStr == null) return null;
-    int numChunks = Integer.parseInt(numChunksStr);
-    if (numChunks == 0) return null;
-
-    Preconditions.checkState(numChunks >= 0);
-    StringBuilder encodedStats = new StringBuilder();
-    for (int i = 0; i < numChunks; ++i) {
-      String chunk = hmsParameters.get(INCREMENTAL_STATS_CHUNK_PREFIX + i);
-      if (chunk == null) {
-        throw new ImpalaRuntimeException("Missing stats chunk: " + i);
-      }
-      encodedStats.append(chunk);
-    }
-
-    byte[] decodedStats = Base64.decodeBase64(encodedStats.toString());
-    TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
-    TPartitionStats ret = new TPartitionStats();
-    JniUtil.deserializeThrift(protocolFactory, ret, decodedStats);
-    return ret;
-  }
-
-  /**
-   * Serialises a TPartitionStats object to a partition.
-   */
-  public static void partStatsToParameters(TPartitionStats partStats,
-      HdfsPartition partition) {
-    // null stats means logically delete the stats from this partition
-    if (partStats == null) {
-      deletePartStats(partition);
-      return;
-    }
-
-    // The HMS has a 4k (as of CDH5.2) limit on the length of any parameter string. The
-    // serialised version of the partition stats is often larger than this. Therefore, we
-    // naively 'chunk' the byte string into 4k pieces, and store the number of pieces in a
-    // separate parameter field.
-    //
-    // The object itself is first serialised by Thrift, and then base-64 encoded to be a
-    // valid string. This inflates its length somewhat; we may want to consider a
-    // different scheme or at least understand why this scheme doesn't seem much more
-    // effective than an ASCII representation.
-    try {
-      TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
-      TSerializer serializer = new TSerializer(protocolFactory);
-      byte[] serialized = serializer.serialize(partStats);
-      String base64 = new String(Base64.encodeBase64(serialized));
-      List<String> chunks =
-          chunkStringForHms(base64, MetaStoreUtil.MAX_PROPERTY_VALUE_LENGTH);
-      partition.putToParameters(
-          INCREMENTAL_STATS_NUM_CHUNKS, Integer.toString(chunks.size()));
-      for (int i = 0; i < chunks.size(); ++i) {
-        partition.putToParameters(INCREMENTAL_STATS_CHUNK_PREFIX + i, chunks.get(i));
-      }
-    } catch (TException e) {
-      LOG.info("Error saving partition stats: ", e);
-      // TODO: What to throw here?
-    }
-  }
-
-  public static void deletePartStats(HdfsPartition partition) {
-    partition.putToParameters(INCREMENTAL_STATS_NUM_CHUNKS, "0");
-    for (Iterator<String> it = partition.getParameters().keySet().iterator();
-         it.hasNext(); ) {
-      if (it.next().startsWith(INCREMENTAL_STATS_CHUNK_PREFIX)) {
-        it.remove();
-      }
-    }
-  }
-
-  static private List<String> chunkStringForHms(String data, int chunkLen) {
-    int idx = 0;
-    List<String> ret = Lists.newArrayList();
-    while (idx < data.length()) {
-      int remaining = data.length() - idx;
-      int chunkSize = (chunkLen > remaining) ? remaining : chunkLen;
-      ret.add(data.substring(idx, idx + chunkSize));
-      idx += chunkSize;
-    }
-    return ret;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/PrimitiveType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/PrimitiveType.java b/fe/src/main/java/com/cloudera/impala/catalog/PrimitiveType.java
deleted file mode 100644
index 4344b61..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/PrimitiveType.java
+++ /dev/null
@@ -1,103 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.google.common.collect.Lists;
-
-public enum PrimitiveType {
-  INVALID_TYPE("INVALID_TYPE", -1, TPrimitiveType.INVALID_TYPE),
-  // NULL_TYPE - used only in LiteralPredicate and NullLiteral to make NULLs compatible
-  // with all other types.
-  NULL_TYPE("NULL_TYPE", 1, TPrimitiveType.NULL_TYPE),
-  BOOLEAN("BOOLEAN", 1, TPrimitiveType.BOOLEAN),
-  TINYINT("TINYINT", 1, TPrimitiveType.TINYINT),
-  SMALLINT("SMALLINT", 2, TPrimitiveType.SMALLINT),
-  INT("INT", 4, TPrimitiveType.INT),
-  BIGINT("BIGINT", 8, TPrimitiveType.BIGINT),
-  FLOAT("FLOAT", 4, TPrimitiveType.FLOAT),
-  DOUBLE("DOUBLE", 8, TPrimitiveType.DOUBLE),
-  DATE("DATE", 4, TPrimitiveType.DATE),
-  DATETIME("DATETIME", 8, TPrimitiveType.DATETIME),
-  // The timestamp structure is 12 bytes, Aligning to 8 bytes makes it 16.
-  TIMESTAMP("TIMESTAMP", 16, TPrimitiveType.TIMESTAMP),
-  // 8-byte pointer and 4-byte length indicator (12 bytes total).
-  // Aligning to 8 bytes so 16 total.
-  STRING("STRING", 16, TPrimitiveType.STRING),
-  VARCHAR("VARCHAR", 16, TPrimitiveType.VARCHAR),
-
-  // Unsupported scalar type.
-  BINARY("BINARY", -1, TPrimitiveType.BINARY),
-
-  // For decimal at the highest precision, the BE uses 16 bytes.
-  DECIMAL("DECIMAL", 16, TPrimitiveType.DECIMAL),
-
-  // Fixed length char array.
-  CHAR("CHAR", -1, TPrimitiveType.CHAR);
-
-  private final String description_;
-  private final int slotSize_;  // size of tuple slot for this type
-  private final TPrimitiveType thriftType_;
-
-  private PrimitiveType(String description, int slotSize, TPrimitiveType thriftType) {
-    description_ = description;
-    slotSize_ = slotSize;
-    thriftType_ = thriftType;
-  }
-
-  @Override
-  public String toString() {
-    return description_;
-  }
-
-  public static PrimitiveType fromThrift(TPrimitiveType t) {
-    switch (t) {
-      case INVALID_TYPE: return INVALID_TYPE;
-      case NULL_TYPE: return NULL_TYPE;
-      case BOOLEAN: return BOOLEAN;
-      case TINYINT: return TINYINT;
-      case SMALLINT: return SMALLINT;
-      case INT: return INT;
-      case BIGINT: return BIGINT;
-      case FLOAT: return FLOAT;
-      case DOUBLE: return DOUBLE;
-      case STRING: return STRING;
-      case VARCHAR: return VARCHAR;
-      case TIMESTAMP: return TIMESTAMP;
-      case CHAR: return CHAR;
-      case DECIMAL: return DECIMAL;
-      case BINARY: return BINARY;
-    }
-    return INVALID_TYPE;
-  }
-
-  public TPrimitiveType toThrift() { return thriftType_; }
-
-  public static List<TPrimitiveType> toThrift(PrimitiveType[] types) {
-    List<TPrimitiveType> result = Lists.newArrayList();
-    for (PrimitiveType t: types) {
-      result.add(t.toThrift());
-    }
-    return result;
-  }
-
-  public int getSlotSize() { return slotSize_; }
-  public static int getMaxSlotSize() { return DECIMAL.slotSize_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/Role.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Role.java b/fe/src/main/java/com/cloudera/impala/catalog/Role.java
deleted file mode 100644
index 7ba1bac..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/Role.java
+++ /dev/null
@@ -1,144 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TRole;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Represents a role in an authorization policy. This class is thread safe.
- */
-public class Role implements CatalogObject {
-  private final TRole role_;
-  // The last role ID assigned, starts at 0.
-  private static AtomicInteger roleId_ = new AtomicInteger(0);
-  private long catalogVersion_ = Catalog.INITIAL_CATALOG_VERSION;
-
-  private final CatalogObjectCache<RolePrivilege> rolePrivileges_ =
-      new CatalogObjectCache<RolePrivilege>();
-
-  public Role(String roleName, Set<String> grantGroups) {
-    role_ = new TRole();
-    role_.setRole_name(roleName);
-    role_.setRole_id(roleId_.incrementAndGet());
-    role_.setGrant_groups(Lists.newArrayList(grantGroups));
-  }
-
-  private Role(TRole role) {
-    role_ = role;
-  }
-
-  /**
-   * Adds a privilege to the role. Returns true if the privilege was added successfully
-   * or false if there was a newer version of the privilege already added to the role.
-   */
-  public boolean addPrivilege(RolePrivilege privilege) {
-    return rolePrivileges_.add(privilege);
-  }
-
-  /**
-   * Returns all privileges for this role. If no privileges have been added to the role
-   * an empty list will be returned.
-   */
-  public List<RolePrivilege> getPrivileges() {
-    return Lists.newArrayList(rolePrivileges_.getValues());
-  }
-
-  /**
-   * Returns all privilege names for this role, or an empty set of no privileges are
-   * granted to the role.
-   */
-  public Set<String> getPrivilegeNames() {
-    return Sets.newHashSet(rolePrivileges_.keySet());
-  }
-
-  /**
-   * Gets a privilege with the given name from this role. If no privilege exists
-   * with this name null is returned.
-   */
-  public RolePrivilege getPrivilege(String privilegeName) {
-    return rolePrivileges_.get(privilegeName);
-  }
-
-  /**
-   * Removes a privilege with the given name from the role. Returns the removed
-   * privilege or null if no privilege exists with this name.
-   */
-  public RolePrivilege removePrivilege(String privilegeName) {
-    return rolePrivileges_.remove(privilegeName);
-  }
-
-  /**
-   * Adds a new grant group to this role.
-   */
-  public synchronized void addGrantGroup(String groupName) {
-    if (role_.getGrant_groups().contains(groupName)) return;
-    role_.addToGrant_groups(groupName);
-  }
-
-  /**
-   * Removes a grant group from this role.
-   */
-  public synchronized void removeGrantGroup(String groupName) {
-    role_.getGrant_groups().remove(groupName);
-    // Should never have duplicates in the list of groups.
-    Preconditions.checkState(!role_.getGrant_groups().contains(groupName));
-  }
-
-  /**
-   * Returns the Thrift representation of the role.
-   */
-  public TRole toThrift() {
-    return role_;
-  }
-
-  /**
-   * Creates a Role from a TRole thrift struct.
-   */
-  public static Role fromThrift(TRole thriftRole) {
-    return new Role(thriftRole);
-  }
-
-  /**
-   * Gets the set of group names that have been granted this role or an empty
-   * Set if no groups have been granted the role.
-   */
-  public Set<String> getGrantGroups() {
-    return Sets.newHashSet(role_.getGrant_groups());
-  }
-  @Override
-  public TCatalogObjectType getCatalogObjectType() { return TCatalogObjectType.ROLE; }
-  @Override
-  public String getName() { return role_.getRole_name(); }
-  public int getId() { return role_.getRole_id(); }
-  @Override
-  public synchronized long getCatalogVersion() { return catalogVersion_; }
-  @Override
-  public synchronized void setCatalogVersion(long newVersion) {
-    catalogVersion_ = newVersion;
-  }
-  @Override
-  public boolean isLoaded() { return true; }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/RolePrivilege.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/RolePrivilege.java b/fe/src/main/java/com/cloudera/impala/catalog/RolePrivilege.java
deleted file mode 100644
index 37d91d8..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/RolePrivilege.java
+++ /dev/null
@@ -1,138 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.List;
-
-import org.apache.log4j.Logger;
-
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents a privilege that has been granted to a role in an authorization policy.
- * This class is thread safe.
- */
-public class RolePrivilege implements CatalogObject {
-  private static final Logger LOG = Logger.getLogger(AuthorizationPolicy.class);
-  // These Joiners are used to build role names. For simplicity, the role name we
-  // use can also be sent to the Sentry library to perform authorization checks
-  // so we build them in the same format.
-  private static final Joiner AUTHORIZABLE_JOINER = Joiner.on("->");
-  private static final Joiner KV_JOINER = Joiner.on("=");
-
-  private final TPrivilege privilege_;
-  private long catalogVersion_ = Catalog.INITIAL_CATALOG_VERSION;
-
-  private RolePrivilege(TPrivilege privilege) {
-    privilege_ = privilege;
-  }
-
-  public TPrivilege toThrift() { return privilege_; }
-  public static RolePrivilege fromThrift(TPrivilege privilege) {
-    return new RolePrivilege(privilege);
-  }
-
-  /**
-   * Builds a privilege name for the given TPrivilege object. For simplicity, this name is
-   * generated in a format that can be sent to the Sentry client to perform authorization
-   * checks.
-   */
-  public static String buildRolePrivilegeName(TPrivilege privilege) {
-    List<String> authorizable = Lists.newArrayListWithExpectedSize(4);
-    try {
-      Preconditions.checkNotNull(privilege);
-      TPrivilegeScope scope = privilege.getScope();
-      Preconditions.checkNotNull(scope);
-      switch (scope) {
-        case SERVER: {
-          authorizable.add(KV_JOINER.join("server", privilege.getServer_name()));
-          break;
-        }
-        case URI: {
-          authorizable.add(KV_JOINER.join("server", privilege.getServer_name()));
-          authorizable.add(KV_JOINER.join("uri", privilege.getUri()));
-          break;
-        }
-        case DATABASE: {
-          authorizable.add(KV_JOINER.join("server", privilege.getServer_name()));
-          authorizable.add(KV_JOINER.join("db", privilege.getDb_name()));
-          break;
-        }
-        case TABLE: {
-          authorizable.add(KV_JOINER.join("server", privilege.getServer_name()));
-          authorizable.add(KV_JOINER.join("db", privilege.getDb_name()));
-          authorizable.add(KV_JOINER.join("table", privilege.getTable_name()));
-          break;
-        }
-        case COLUMN: {
-          authorizable.add(KV_JOINER.join("server", privilege.getServer_name()));
-          authorizable.add(KV_JOINER.join("db", privilege.getDb_name()));
-          authorizable.add(KV_JOINER.join("table", privilege.getTable_name()));
-          authorizable.add(KV_JOINER.join("column", privilege.getColumn_name()));
-          break;
-        }
-        default: {
-          throw new UnsupportedOperationException(
-              "Unknown privilege scope: " + scope.toString());
-        }
-      }
-
-      // The ALL privilege is always implied and does not need to be included as part
-      // of the name.
-      if (privilege.getPrivilege_level() != TPrivilegeLevel.ALL) {
-        authorizable.add(KV_JOINER.join("action",
-            privilege.getPrivilege_level().toString()));
-      }
-      return AUTHORIZABLE_JOINER.join(authorizable);
-    } catch (Exception e) {
-      // Should never make it here unless the privilege is malformed.
-      LOG.error("ERROR: ", e);
-      return null;
-    }
-  }
-
-  @Override
-  public TCatalogObjectType getCatalogObjectType() {
-    return TCatalogObjectType.PRIVILEGE;
-  }
-  @Override
-  public String getName() { return privilege_.getPrivilege_name(); }
-  public int getRoleId() { return privilege_.getRole_id(); }
-  @Override
-  public synchronized long getCatalogVersion() { return catalogVersion_; }
-  @Override
-  public synchronized void setCatalogVersion(long newVersion) {
-    catalogVersion_ = newVersion;
-  }
-  @Override
-  public boolean isLoaded() { return true; }
-
-  // The time this role was created. Used to quickly check if the same privilege
-  // was dropped and re-created. Assumes a role will not be created + dropped + created
-  // in less than 1ms. Returns -1 if create_time_ms was not set for the privilege.
-  public long getCreateTimeMs() {
-    return privilege_.isSetCreate_time_ms() ? privilege_.getCreate_time_ms() : -1L;
-  }
-  public TPrivilegeScope getScope() { return privilege_.getScope(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/catalog/RowFormat.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/RowFormat.java b/fe/src/main/java/com/cloudera/impala/catalog/RowFormat.java
deleted file mode 100644
index 76dcacb..0000000
--- a/fe/src/main/java/com/cloudera/impala/catalog/RowFormat.java
+++ /dev/null
@@ -1,109 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.catalog;
-
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.thrift.TTableRowFormat;
-import com.google.common.base.Preconditions;
-
-/**
- * Defines the physical (on-disk) format for a table's data. This is used when creating
- * a new table to specify how to interpret the fields (columns) and lines (rows) in a
- * a data file.
- */
-public class RowFormat {
-  // Default row format
-  public final static RowFormat DEFAULT_ROW_FORMAT = new RowFormat(null, null, null);
-
-  private final String fieldDelimiter_;
-  private final String lineDelimiter_;
-  private final String escapeChar_;
-
-  private RowFormat(String fieldDelimiter, String lineDelimiter, String escapeChar,
-      boolean unescape) {
-    if (unescape) {
-      fieldDelimiter_ = getUnescapedValueOrNull(fieldDelimiter);
-      lineDelimiter_ = getUnescapedValueOrNull(lineDelimiter);
-      escapeChar_ = getUnescapedValueOrNull(escapeChar);
-    } else {
-      fieldDelimiter_ = fieldDelimiter;
-      lineDelimiter_ = lineDelimiter;
-      escapeChar_ = escapeChar;
-    }
-  }
-
-  /**
-   * Creates a new instance of the RowFormat class, unescaping the values of
-   * field delimiter, line delimiter, and escape char.
-   */
-  public RowFormat(String fieldDelimiter, String lineDelimiter, String escapeChar) {
-    this(fieldDelimiter, lineDelimiter, escapeChar, true);
-  }
-
-  public String getFieldDelimiter() {
-    return fieldDelimiter_;
-  }
-
-  public String getLineDelimiter() {
-    return lineDelimiter_;
-  }
-
-  public String getEscapeChar() {
-    return escapeChar_;
-  }
-
-  public boolean isDefault() {
-    return fieldDelimiter_ == null && lineDelimiter_ == null && escapeChar_ == null;
-  }
-
-  private static String getUnescapedValueOrNull(String value) {
-    return value == null ? null : new StringLiteral(value).getUnescapedValue();
-  }
-
-  public TTableRowFormat toThrift() {
-    TTableRowFormat tableRowFormat = new TTableRowFormat();
-    tableRowFormat.setField_terminator(getFieldDelimiter());
-    tableRowFormat.setLine_terminator(getLineDelimiter());
-    tableRowFormat.setEscaped_by(getEscapeChar());
-    return tableRowFormat;
-  }
-
-  public static RowFormat fromThrift(TTableRowFormat tableRowFormat) {
-    if (tableRowFormat == null) {
-      return RowFormat.DEFAULT_ROW_FORMAT;
-    }
-    // When creating a RowFormat from thrift, don't unescape the values, they should have
-    // already been unescaped.
-    return new RowFormat(tableRowFormat.getField_terminator(),
-        tableRowFormat.getLine_terminator(), tableRowFormat.getEscaped_by(), false);
-  }
-
-  /**
-   * Returns the RowFormat for the storage descriptor.
-   */
-  public static RowFormat fromStorageDescriptor(StorageDescriptor sd) {
-    Preconditions.checkNotNull(sd);
-    Map<String, String> params = sd.getSerdeInfo().getParameters();
-    return new RowFormat(params.get("field.delim"), params.get("line.delim"),
-        params.get("escape.delim"));
-  }
-}


[19/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlanFragment.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlanFragment.java b/fe/src/main/java/com/cloudera/impala/planner/PlanFragment.java
deleted file mode 100644
index 48a71dc..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlanFragment.java
+++ /dev/null
@@ -1,388 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.cloudera.impala.thrift.TPlan;
-import com.cloudera.impala.thrift.TPlanFragment;
-import com.cloudera.impala.thrift.TPlanFragmentTree;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-
-/**
- * PlanFragments form a tree structure via their ExchangeNodes. A tree of fragments
- * connected in that way forms a plan. The output of a plan is produced by the root
- * fragment and is either the result of the query or an intermediate result
- * needed by a different plan (such as a hash table).
- *
- * Plans are grouped into cohorts based on the consumer of their output: all
- * plans that materialize intermediate results for a particular consumer plan
- * are grouped into a single cohort.
- *
- * A PlanFragment encapsulates the specific tree of execution nodes that
- * are used to produce the output of the plan fragment, as well as output exprs,
- * destination node, etc. If there are no output exprs, the full row that is
- * is produced by the plan root is marked as materialized.
- *
- * A plan fragment can have one or many instances, each of which in turn is executed by
- * an individual node and the output sent to a specific instance of the destination
- * fragment (or, in the case of the root fragment, is materialized in some form).
- *
- * A hash-partitioned plan fragment is the result of one or more hash-partitioning data
- * streams being received by plan nodes in this fragment. In the future, a fragment's
- * data partition could also be hash partitioned based on a scan node that is reading
- * from a physically hash-partitioned table.
- *
- * The sequence of calls is:
- * - c'tor
- * - assemble with getters, etc.
- * - finalize()
- * - toThrift()
- *
- * TODO: the tree of PlanNodes is connected across fragment boundaries, which makes
- *   it impossible search for things within a fragment (using TreeNode functions);
- *   fix that
- */
-public class PlanFragment extends TreeNode<PlanFragment> {
-  private final static Logger LOG = LoggerFactory.getLogger(PlanFragment.class);
-
-  private final PlanFragmentId fragmentId_;
-  private PlanId planId_;
-  private CohortId cohortId_;
-
-  // root of plan tree executed by this fragment
-  private PlanNode planRoot_;
-
-  // exchange node to which this fragment sends its output
-  private ExchangeNode destNode_;
-
-  // if null, outputs the entire row produced by planRoot_
-  private List<Expr> outputExprs_;
-
-  // created in finalize() or set in setSink()
-  private DataSink sink_;
-
-  // specification of the partition of the input of this fragment;
-  // an UNPARTITIONED fragment is executed on only a single node
-  // TODO: improve this comment, "input" is a bit misleading
-  private DataPartition dataPartition_;
-
-  // specification of how the output of this fragment is partitioned (i.e., how
-  // it's sent to its destination);
-  // if the output is UNPARTITIONED, it is being broadcast
-  private DataPartition outputPartition_;
-
-  /**
-   * C'tor for fragment with specific partition; the output is by default broadcast.
-   */
-  public PlanFragment(PlanFragmentId id, PlanNode root, DataPartition partition) {
-    fragmentId_ = id;
-    planRoot_ = root;
-    dataPartition_ = partition;
-    outputPartition_ = DataPartition.UNPARTITIONED;
-    setFragmentInPlanTree(planRoot_);
-  }
-
-  /**
-   * Assigns 'this' as fragment of all PlanNodes in the plan tree rooted at node.
-   * Does not traverse the children of ExchangeNodes because those must belong to a
-   * different fragment.
-   */
-  public void setFragmentInPlanTree(PlanNode node) {
-    if (node == null) return;
-    node.setFragment(this);
-    if (node instanceof ExchangeNode) return;
-    for (PlanNode child : node.getChildren()) setFragmentInPlanTree(child);
-  }
-
-  /**
-   * Collect all PlanNodes that belong to the exec tree of this fragment.
-   */
-  public void collectPlanNodes(List<PlanNode> nodes) {
-    Preconditions.checkNotNull(nodes);
-    collectPlanNodesHelper(planRoot_, nodes);
-  }
-
-  private void collectPlanNodesHelper(PlanNode root, List<PlanNode> nodes) {
-    if (root == null) return;
-    nodes.add(root);
-    if (root instanceof ExchangeNode) return;
-    for (PlanNode child: root.getChildren()) collectPlanNodesHelper(child, nodes);
-  }
-
-  public void setOutputExprs(List<Expr> outputExprs) {
-    outputExprs_ = Expr.cloneList(outputExprs);
-  }
-  public List<Expr> getOutputExprs() { return outputExprs_; }
-
-  /**
-   * Finalize plan tree and create stream sink, if needed.
-   * If this fragment is hash partitioned, ensures that the corresponding partition
-   * exprs of all hash-partitioning senders are cast to identical types.
-   * Otherwise, the hashes generated for identical partition values may differ
-   * among senders if the partition-expr types are not identical.
-   */
-  public void finalize(Analyzer analyzer)
-      throws InternalException, NotImplementedException {
-    if (destNode_ != null) {
-      Preconditions.checkState(sink_ == null);
-      // we're streaming to an exchange node
-      DataStreamSink streamSink = new DataStreamSink(destNode_, outputPartition_);
-      streamSink.setFragment(this);
-      sink_ = streamSink;
-    }
-
-    if (!dataPartition_.isHashPartitioned()) return;
-
-    // This fragment is hash partitioned. Gather all exchange nodes and ensure
-    // that all hash-partitioning senders hash on exprs-values of the same type.
-    List<ExchangeNode> exchNodes = Lists.newArrayList();
-    planRoot_.collect(Predicates.instanceOf(ExchangeNode.class), exchNodes);
-
-    // Contains partition-expr lists of all hash-partitioning sender fragments.
-    List<List<Expr>> senderPartitionExprs = Lists.newArrayList();
-    for (ExchangeNode exchNode: exchNodes) {
-      Preconditions.checkState(!exchNode.getChildren().isEmpty());
-      PlanFragment senderFragment = exchNode.getChild(0).getFragment();
-      Preconditions.checkNotNull(senderFragment);
-      if (!senderFragment.getOutputPartition().isHashPartitioned()) continue;
-      List<Expr> partExprs = senderFragment.getOutputPartition().getPartitionExprs();
-      // All hash-partitioning senders must have compatible partition exprs, otherwise
-      // this fragment's data partition must not be hash partitioned.
-      Preconditions.checkState(
-          partExprs.size() == dataPartition_.getPartitionExprs().size());
-      senderPartitionExprs.add(partExprs);
-    }
-
-    // Cast all corresponding hash partition exprs of all hash-partitioning senders
-    // to their compatible types. Also cast the data partition's exprs for consistency,
-    // although not strictly necessary. They should already be type identical to the
-    // exprs of one of the senders and they are not directly used for hashing in the BE.
-    senderPartitionExprs.add(dataPartition_.getPartitionExprs());
-    try {
-      analyzer.castToUnionCompatibleTypes(senderPartitionExprs);
-    } catch (AnalysisException e) {
-      // Should never happen. Analysis should have ensured type compatibility already.
-      throw new IllegalStateException(e);
-    }
-  }
-
-  /**
-   * Return the number of nodes on which the plan fragment will execute.
-   * invalid: -1
-   */
-  public int getNumNodes() {
-    return dataPartition_ == DataPartition.UNPARTITIONED ? 1 : planRoot_.getNumNodes();
-  }
-
- /**
-   * Estimates the per-node number of distinct values of exprs based on the data
-   * partition of this fragment and its number of nodes. Returns -1 for an invalid
-   * estimate, e.g., because getNumDistinctValues() failed on one of the exprs.
-   */
-  public long getNumDistinctValues(List<Expr> exprs) {
-    Preconditions.checkNotNull(dataPartition_);
-    long result = 1;
-    int numNodes = getNumNodes();
-    Preconditions.checkState(numNodes >= 0);
-    // The number of nodes is zero for empty tables.
-    if (numNodes == 0) return 0;
-    for (Expr expr: exprs) {
-      long numDistinct = expr.getNumDistinctValues();
-      if (numDistinct == -1) {
-        result = -1;
-        break;
-      }
-      if (dataPartition_.getPartitionExprs().contains(expr)) {
-        numDistinct = (long)Math.max((double) numDistinct / (double) numNodes, 1L);
-      }
-      result = PlanNode.multiplyCardinalities(result, numDistinct);
-    }
-    return result;
-  }
-
-  public TPlanFragment toThrift() {
-    TPlanFragment result = new TPlanFragment();
-    result.setDisplay_name(fragmentId_.toString());
-    if (planRoot_ != null) result.setPlan(planRoot_.treeToThrift());
-    if (outputExprs_ != null) {
-      result.setOutput_exprs(Expr.treesToThrift(outputExprs_));
-    }
-    if (sink_ != null) result.setOutput_sink(sink_.toThrift());
-    result.setPartition(dataPartition_.toThrift());
-    return result;
-  }
-
-  public TPlanFragmentTree treeToThrift() {
-    TPlanFragmentTree result = new TPlanFragmentTree();
-    treeToThriftHelper(result);
-    return result;
-  }
-
-  private void treeToThriftHelper(TPlanFragmentTree plan) {
-    plan.addToFragments(toThrift());
-    for (PlanFragment child: children_) {
-      child.treeToThriftHelper(plan);
-    }
-  }
-
-  public String getExplainString(TExplainLevel detailLevel) {
-    return getExplainString("", "", detailLevel);
-  }
-
-  /**
-   * The root of the output tree will be prefixed by rootPrefix and the remaining plan
-   * output will be prefixed by prefix.
-   */
-  protected final String getExplainString(String rootPrefix, String prefix,
-      TExplainLevel detailLevel) {
-    StringBuilder str = new StringBuilder();
-    Preconditions.checkState(dataPartition_ != null);
-    String detailPrefix = prefix + "|  ";  // sink detail
-    if (detailLevel == TExplainLevel.VERBOSE) {
-      // we're printing a new tree, start over with the indentation
-      prefix = "  ";
-      rootPrefix = "  ";
-      detailPrefix = prefix + "|  ";
-      str.append(String.format("%s:PLAN FRAGMENT [%s]\n", fragmentId_.toString(),
-          dataPartition_.getExplainString()));
-      if (sink_ != null && sink_ instanceof DataStreamSink) {
-        str.append(sink_.getExplainString(rootPrefix, prefix, detailLevel) + "\n");
-      }
-    }
-
-    String planRootPrefix = rootPrefix;
-    // Always print sinks other than DataStreamSinks.
-    if (sink_ != null && !(sink_ instanceof DataStreamSink)) {
-      str.append(sink_.getExplainString(rootPrefix, detailPrefix, detailLevel));
-      if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-        str.append(prefix + "|\n");
-      }
-      // we already used the root prefix for the sink
-      planRootPrefix = prefix;
-    }
-    if (planRoot_ != null) {
-      str.append(planRoot_.getExplainString(planRootPrefix, prefix, detailLevel));
-    }
-    return str.toString();
-  }
-
-  /** Returns true if this fragment is partitioned. */
-  public boolean isPartitioned() {
-    return (dataPartition_.getType() != TPartitionType.UNPARTITIONED);
-  }
-
-  public PlanFragmentId getId() { return fragmentId_; }
-  public PlanId getPlanId() { return planId_; }
-  public void setPlanId(PlanId id) { planId_ = id; }
-  public CohortId getCohortId() { return cohortId_; }
-  public void setCohortId(CohortId id) { cohortId_ = id; }
-  public PlanFragment getDestFragment() {
-    if (destNode_ == null) return null;
-    return destNode_.getFragment();
-  }
-  public ExchangeNode getDestNode() { return destNode_; }
-  public DataPartition getDataPartition() { return dataPartition_; }
-  public void setDataPartition(DataPartition dataPartition) {
-    this.dataPartition_ = dataPartition;
-  }
-  public DataPartition getOutputPartition() { return outputPartition_; }
-  public void setOutputPartition(DataPartition outputPartition) {
-    this.outputPartition_ = outputPartition;
-  }
-  public PlanNode getPlanRoot() { return planRoot_; }
-  public void setPlanRoot(PlanNode root) {
-    planRoot_ = root;
-    setFragmentInPlanTree(planRoot_);
-  }
-
-  public void setDestination(ExchangeNode destNode) {
-    destNode_ = destNode;
-    PlanFragment dest = getDestFragment();
-    Preconditions.checkNotNull(dest);
-    dest.addChild(this);
-  }
-
-  public boolean hasSink() { return sink_ != null; }
-  public DataSink getSink() { return sink_; }
-  public void setSink(DataSink sink) {
-    Preconditions.checkState(this.sink_ == null);
-    Preconditions.checkNotNull(sink);
-    sink.setFragment(this);
-    this.sink_ = sink;
-  }
-
-  /**
-   * Adds a node as the new root to the plan tree. Connects the existing
-   * root as the child of newRoot.
-   */
-  public void addPlanRoot(PlanNode newRoot) {
-    Preconditions.checkState(newRoot.getChildren().size() == 1);
-    newRoot.setChild(0, planRoot_);
-    planRoot_ = newRoot;
-    planRoot_.setFragment(this);
-  }
-
-  /**
-   * Verify that the tree of PlanFragments and their contained tree of
-   * PlanNodes is constructed correctly.
-   */
-  public void verifyTree() {
-    // PlanNode.fragment_ is set correctly
-    List<PlanNode> nodes = Lists.newArrayList();
-    collectPlanNodes(nodes);
-    List<PlanNode> exchNodes = Lists.newArrayList();
-    for (PlanNode node: nodes) {
-      if (node instanceof ExchangeNode) exchNodes.add(node);
-      Preconditions.checkState(node.getFragment() == this);
-    }
-
-    // all ExchangeNodes have registered input fragments
-    Preconditions.checkState(exchNodes.size() == getChildren().size());
-    List<PlanFragment> childFragments = Lists.newArrayList();
-    for (PlanNode exchNode: exchNodes) {
-      PlanFragment childFragment = exchNode.getChild(0).getFragment();
-      Preconditions.checkState(!childFragments.contains(childFragment));
-      childFragments.add(childFragment);
-      Preconditions.checkState(childFragment.getDestNode() == exchNode);
-    }
-    // all registered children are accounted for
-    Preconditions.checkState(getChildren().containsAll(childFragments));
-
-    for (PlanFragment child: getChildren()) child.verifyTree();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlanFragmentId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlanFragmentId.java b/fe/src/main/java/com/cloudera/impala/planner/PlanFragmentId.java
deleted file mode 100644
index 98b08fe..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlanFragmentId.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class PlanFragmentId extends Id<PlanFragmentId> {
-  // Construction only allowed via an IdGenerator.
-  protected PlanFragmentId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<PlanFragmentId> createGenerator() {
-    return new IdGenerator<PlanFragmentId>() {
-      @Override
-      public PlanFragmentId getNextId() { return new PlanFragmentId(nextId_++); }
-      @Override
-      public PlanFragmentId getMaxId() { return new PlanFragmentId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("F%02d", id_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlanId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlanId.java b/fe/src/main/java/com/cloudera/impala/planner/PlanId.java
deleted file mode 100644
index 2cecbd8..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlanId.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class PlanId extends Id<PlanId> {
-  // Construction only allowed via an IdGenerator.
-  protected PlanId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<PlanId> createGenerator() {
-    return new IdGenerator<PlanId>() {
-      @Override
-      public PlanId getNextId() { return new PlanId(nextId_++); }
-      @Override
-      public PlanId getMaxId() { return new PlanId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("%02d", id_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlanNode.java b/fe/src/main/java/com/cloudera/impala/planner/PlanNode.java
deleted file mode 100644
index d38f10a..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlanNode.java
+++ /dev/null
@@ -1,715 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprId;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
-import com.cloudera.impala.thrift.TExecStats;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlan;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.math.LongMath;
-
-/**
- * Each PlanNode represents a single relational operator
- * and encapsulates the information needed by the planner to
- * make optimization decisions.
- *
- * finalize(): Computes internal state, such as keys for scan nodes; gets called once on
- * the root of the plan tree before the call to toThrift(). Also finalizes the set
- * of conjuncts, such that each remaining one requires all of its referenced slots to
- * be materialized (ie, can be evaluated by calling GetValue(), rather than being
- * implicitly evaluated as part of a scan key).
- *
- * conjuncts_: Each node has a list of conjuncts that can be executed in the context of
- * this node, ie, they only reference tuples materialized by this node or one of
- * its children (= are bound by tupleIds_).
- */
-abstract public class PlanNode extends TreeNode<PlanNode> {
-  private final static Logger LOG = LoggerFactory.getLogger(PlanNode.class);
-
-  // TODO: Retrieve from the query options instead of using a default.
-  protected final static int DEFAULT_BATCH_SIZE = 1024;
-
-  // String used for this node in getExplainString().
-  protected String displayName_;
-
-  // unique w/in plan tree; assigned by planner, and not necessarily in c'tor
-  protected PlanNodeId id_;
-
-  protected long limit_; // max. # of rows to be returned; 0: no limit_
-
-  // ids materialized by the tree rooted at this node
-  protected ArrayList<TupleId> tupleIds_;
-
-  // ids of the TblRefs "materialized" by this node; identical with tupleIds_
-  // if the tree rooted at this node only materializes BaseTblRefs;
-  // useful during plan generation
-  protected ArrayList<TupleId> tblRefIds_;
-
-  // A set of nullable TupleId produced by this node. It is a subset of tupleIds_.
-  // A tuple is nullable within a particular plan tree if it's the "nullable" side of
-  // an outer join, which has nothing to do with the schema.
-  protected Set<TupleId> nullableTupleIds_ = Sets.newHashSet();
-
-  protected List<Expr> conjuncts_ = Lists.newArrayList();
-
-  // Fragment that this PlanNode is executed in. Valid only after this PlanNode has been
-  // assigned to a fragment. Set and maintained by enclosing PlanFragment.
-  protected PlanFragment fragment_;
-
-  // if set, needs to be applied by parent node to reference this node's output
-  protected ExprSubstitutionMap outputSmap_;
-
-  // global state of planning wrt conjunct assignment; used by planner as a shortcut
-  // to avoid having to pass assigned conjuncts back and forth
-  // (the planner uses this to save and reset the global state in between join tree
-  // alternatives)
-  // TODO for 2.3: Save this state in the PlannerContext instead.
-  protected Set<ExprId> assignedConjuncts_;
-
-  // estimate of the output cardinality of this node; set in computeStats();
-  // invalid: -1
-  protected long cardinality_;
-
-  // number of nodes on which the plan tree rooted at this node would execute;
-  // set in computeStats(); invalid: -1
-  protected int numNodes_;
-
-  // sum of tupleIds_' avgSerializedSizes; set in computeStats()
-  protected float avgRowSize_;
-
-  // estimated per-host memory requirement for this node;
-  // set in computeCosts(); invalid: -1
-  protected long perHostMemCost_ = -1;
-
-  // Runtime filters assigned to this node.
-  protected List<RuntimeFilter> runtimeFilters_ = Lists.newArrayList();
-
-  protected PlanNode(PlanNodeId id, List<TupleId> tupleIds, String displayName) {
-    this(id, displayName);
-    tupleIds_.addAll(tupleIds);
-    tblRefIds_.addAll(tupleIds);
-  }
-
-  /**
-   * Deferred id_ assignment.
-   */
-  protected PlanNode(String displayName) {
-    this(null, displayName);
-  }
-
-  protected PlanNode(PlanNodeId id, String displayName) {
-    id_ = id;
-    limit_ = -1;
-    tupleIds_ = Lists.newArrayList();
-    tblRefIds_ = Lists.newArrayList();
-    cardinality_ = -1;
-    numNodes_ = -1;
-    displayName_ = displayName;
-  }
-
-  /**
-   * Copy c'tor. Also passes in new id_.
-   */
-  protected PlanNode(PlanNodeId id, PlanNode node, String displayName) {
-    id_ = id;
-    limit_ = node.limit_;
-    tupleIds_ = Lists.newArrayList(node.tupleIds_);
-    tblRefIds_ = Lists.newArrayList(node.tblRefIds_);
-    nullableTupleIds_ = Sets.newHashSet(node.nullableTupleIds_);
-    conjuncts_ = Expr.cloneList(node.conjuncts_);
-    cardinality_ = -1;
-    numNodes_ = -1;
-    displayName_ = displayName;
-  }
-
-  /**
-   * Sets tblRefIds_, tupleIds_, and nullableTupleIds_.
-   * The default implementation is a no-op.
-   */
-  public void computeTupleIds() {
-    Preconditions.checkState(children_.isEmpty() || !tupleIds_.isEmpty());
-  }
-
-  /**
-   * Clears tblRefIds_, tupleIds_, and nullableTupleIds_.
-   */
-  protected void clearTupleIds() {
-    tblRefIds_.clear();
-    tupleIds_.clear();
-    nullableTupleIds_.clear();
-  }
-
-  public PlanNodeId getId() { return id_; }
-  public void setId(PlanNodeId id) {
-    Preconditions.checkState(id_ == null);
-    id_ = id;
-  }
-  public long getLimit() { return limit_; }
-  public boolean hasLimit() { return limit_ > -1; }
-  public long getPerHostMemCost() { return perHostMemCost_; }
-  public long getCardinality() { return cardinality_; }
-  public int getNumNodes() { return numNodes_; }
-  public float getAvgRowSize() { return avgRowSize_; }
-  public void setFragment(PlanFragment fragment) { fragment_ = fragment; }
-  public PlanFragment getFragment() { return fragment_; }
-  public List<Expr> getConjuncts() { return conjuncts_; }
-  public ExprSubstitutionMap getOutputSmap() { return outputSmap_; }
-  public void setOutputSmap(ExprSubstitutionMap smap) { outputSmap_ = smap; }
-  public Set<ExprId> getAssignedConjuncts() { return assignedConjuncts_; }
-  public void setAssignedConjuncts(Set<ExprId> conjuncts) {
-    assignedConjuncts_ = conjuncts;
-  }
-
-  /**
-   * Set the limit_ to the given limit_ only if the limit_ hasn't been set, or the new limit_
-   * is lower.
-   * @param limit_
-   */
-  public void setLimit(long limit) {
-    if (limit_ == -1 || (limit != -1 && limit_ > limit)) limit_ = limit;
-  }
-
-  public void unsetLimit() { limit_ = -1; }
-
-  public ArrayList<TupleId> getTupleIds() {
-    Preconditions.checkState(tupleIds_ != null);
-    return tupleIds_;
-  }
-
-  public ArrayList<TupleId> getTblRefIds() { return tblRefIds_; }
-  public void setTblRefIds(ArrayList<TupleId> ids) { tblRefIds_ = ids; }
-
-  public Set<TupleId> getNullableTupleIds() {
-    Preconditions.checkState(nullableTupleIds_ != null);
-    return nullableTupleIds_;
-  }
-
-  public void addConjuncts(List<Expr> conjuncts) {
-    if (conjuncts == null)  return;
-    conjuncts_.addAll(conjuncts);
-  }
-
-  public void transferConjuncts(PlanNode recipient) {
-    recipient.conjuncts_.addAll(conjuncts_);
-    conjuncts_.clear();
-  }
-
-  public String getExplainString() {
-    return getExplainString("", "", TExplainLevel.VERBOSE);
-  }
-
-  protected void setDisplayName(String s) { displayName_ = s; }
-
-  final protected String getDisplayLabel() {
-    return String.format("%s:%s", id_.toString(), displayName_);
-  }
-
-  /**
-   * Subclasses can override to provide a node specific detail string that
-   * is displayed to the user.
-   * e.g. scan can return the table name.
-   */
-  protected String getDisplayLabelDetail() { return ""; }
-
-  /**
-   * Generate the explain plan tree. The plan will be in the form of:
-   *
-   * root
-   * |
-   * |----child 3
-   * |      limit:1
-   * |
-   * |----child 2
-   * |      limit:2
-   * |
-   * child 1
-   *
-   * The root node header line will be prefixed by rootPrefix and the remaining plan
-   * output will be prefixed by prefix.
-   */
-  protected final String getExplainString(String rootPrefix, String prefix,
-      TExplainLevel detailLevel) {
-    StringBuilder expBuilder = new StringBuilder();
-    String detailPrefix = prefix;
-    String filler;
-    boolean printFiller = (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal());
-
-    // Do not traverse into the children of an Exchange node to avoid crossing
-    // fragment boundaries.
-    boolean traverseChildren = !children_.isEmpty() &&
-        !(this instanceof ExchangeNode && detailLevel == TExplainLevel.VERBOSE);
-
-    if (traverseChildren) {
-      detailPrefix += "|  ";
-      filler = prefix + "|";
-    } else {
-      detailPrefix += "   ";
-      filler = prefix;
-    }
-
-    // Print the current node
-    // The plan node header line will be prefixed by rootPrefix and the remaining details
-    // will be prefixed by detailPrefix.
-    expBuilder.append(getNodeExplainString(rootPrefix, detailPrefix, detailLevel));
-
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal() &&
-        !(this instanceof SortNode)) {
-      if (limit_ != -1) expBuilder.append(detailPrefix + "limit: " + limit_ + "\n");
-      expBuilder.append(getOffsetExplainString(detailPrefix));
-    }
-
-    // Output cardinality, cost estimates and tuple Ids only when explain plan level
-    // is extended or above.
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      // Print estimated output cardinality and memory cost.
-      expBuilder.append(PrintUtils.printHosts(detailPrefix, numNodes_));
-      expBuilder.append(PrintUtils.printMemCost(" ", perHostMemCost_) + "\n");
-
-      // Print tuple ids and row size.
-      expBuilder.append(detailPrefix + "tuple-ids=");
-      for (int i = 0; i < tupleIds_.size(); ++i) {
-        TupleId tupleId = tupleIds_.get(i);
-        String nullIndicator = nullableTupleIds_.contains(tupleId) ? "N" : "";
-        expBuilder.append(tupleId.asInt() + nullIndicator);
-        if (i + 1 != tupleIds_.size()) expBuilder.append(",");
-      }
-      expBuilder.append(" row-size=" + PrintUtils.printBytes(Math.round(avgRowSize_)));
-      expBuilder.append(PrintUtils.printCardinality(" ", cardinality_));
-      expBuilder.append("\n");
-    }
-
-    // Print the children. Do not traverse into the children of an Exchange node to
-    // avoid crossing fragment boundaries.
-    if (traverseChildren) {
-      if (printFiller) expBuilder.append(filler + "\n");
-      String childHeadlinePrefix = prefix + "|--";
-      String childDetailPrefix = prefix + "|  ";
-      for (int i = children_.size() - 1; i >= 1; --i) {
-        PlanNode child = getChild(i);
-        if (fragment_ != child.fragment_) {
-          // we're crossing a fragment boundary
-          expBuilder.append(
-              child.fragment_.getExplainString(
-                childHeadlinePrefix, childDetailPrefix, detailLevel));
-        } else {
-          expBuilder.append(
-              child.getExplainString(childHeadlinePrefix, childDetailPrefix,
-                  detailLevel));
-        }
-        if (printFiller) expBuilder.append(filler + "\n");
-      }
-      expBuilder.append(children_.get(0).getExplainString(prefix, prefix, detailLevel));
-    }
-    return expBuilder.toString();
-  }
-
-  /**
-   * Return the node-specific details.
-   * Subclass should override this function.
-   * Each line should be prefixed by detailPrefix.
-   */
-  protected String getNodeExplainString(String rootPrefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    return "";
-  }
-
-  /**
-   * Return the offset_ details, if applicable. This is available separately from
-   * 'getNodeExplainString' because we want to output 'limit: ...' (which can be printed
-   * from PlanNode) before 'offset: ...', which is only printed from SortNodes right
-   * now.
-   */
-  protected String getOffsetExplainString(String prefix) {
-    return "";
-  }
-
-  // Convert this plan node, including all children, to its Thrift representation.
-  public TPlan treeToThrift() {
-    TPlan result = new TPlan();
-    treeToThriftHelper(result);
-    return result;
-  }
-
-  // Append a flattened version of this plan node, including all children, to 'container'.
-  private void treeToThriftHelper(TPlan container) {
-    TPlanNode msg = new TPlanNode();
-    msg.node_id = id_.asInt();
-    msg.limit = limit_;
-
-    TExecStats estimatedStats = new TExecStats();
-    estimatedStats.setCardinality(cardinality_);
-    estimatedStats.setMemory_used(perHostMemCost_);
-    msg.setLabel(getDisplayLabel());
-    msg.setLabel_detail(getDisplayLabelDetail());
-    msg.setEstimated_stats(estimatedStats);
-
-    Preconditions.checkState(tupleIds_.size() > 0);
-    msg.setRow_tuples(Lists.<Integer>newArrayListWithCapacity(tupleIds_.size()));
-    msg.setNullable_tuples(Lists.<Boolean>newArrayListWithCapacity(tupleIds_.size()));
-    for (TupleId tid: tupleIds_) {
-      msg.addToRow_tuples(tid.asInt());
-      msg.addToNullable_tuples(nullableTupleIds_.contains(tid));
-    }
-    for (Expr e: conjuncts_) {
-      msg.addToConjuncts(e.treeToThrift());
-    }
-    // Serialize any runtime filters
-    for (RuntimeFilter filter: runtimeFilters_) {
-      msg.addToRuntime_filters(filter.toThrift());
-    }
-    toThrift(msg);
-    container.addToNodes(msg);
-    // For the purpose of the BE consider ExchangeNodes to have no children.
-    if (this instanceof ExchangeNode) {
-      msg.num_children = 0;
-      return;
-    } else {
-      msg.num_children = children_.size();
-      for (PlanNode child: children_) {
-        child.treeToThriftHelper(container);
-      }
-    }
-  }
-
-  /**
-   * Computes the full internal state, including smap and planner-relevant statistics
-   * (calls computeStats()), marks all slots referenced by this node as materialized
-   * and computes the mem layout of all materialized tuples (with the assumption that
-   * slots that are needed by ancestor PlanNodes have already been marked).
-   * Also performs final expr substitution with childrens' smaps and computes internal
-   * state required for toThrift(). This is called directly after construction.
-   * Throws if an expr substitution or evaluation fails.
-   */
-  public void init(Analyzer analyzer) throws ImpalaException {
-    assignConjuncts(analyzer);
-    computeStats(analyzer);
-    createDefaultSmap(analyzer);
-  }
-
-  /**
-   * Assign remaining unassigned conjuncts.
-   */
-  protected void assignConjuncts(Analyzer analyzer) {
-    List<Expr> unassigned = analyzer.getUnassignedConjuncts(this);
-    conjuncts_.addAll(unassigned);
-    analyzer.markConjunctsAssigned(unassigned);
-  }
-
-  /**
-   * Returns an smap that combines the childrens' smaps.
-   */
-  protected ExprSubstitutionMap getCombinedChildSmap() {
-    if (getChildren().size() == 0) return new ExprSubstitutionMap();
-    if (getChildren().size() == 1) return getChild(0).getOutputSmap();
-    ExprSubstitutionMap result = ExprSubstitutionMap.combine(
-        getChild(0).getOutputSmap(), getChild(1).getOutputSmap());
-    for (int i = 2; i < getChildren().size(); ++i) {
-      result = ExprSubstitutionMap.combine(result, getChild(i).getOutputSmap());
-    }
-    return result;
-  }
-
-  /**
-   * Sets outputSmap_ to compose(existing smap, combined child smap). Also
-   * substitutes conjuncts_ using the combined child smap.
-   */
-  protected void createDefaultSmap(Analyzer analyzer) {
-    ExprSubstitutionMap combinedChildSmap = getCombinedChildSmap();
-    outputSmap_ =
-        ExprSubstitutionMap.compose(outputSmap_, combinedChildSmap, analyzer);
-    conjuncts_ = Expr.substituteList(conjuncts_, outputSmap_, analyzer, false);
-  }
-
-  /**
-   * Computes planner statistics: avgRowSize_, numNodes_, cardinality_.
-   * Subclasses need to override this.
-   * Assumes that it has already been called on all children.
-   * and that DescriptorTable.computePhysMemLayout() has been called.
-   * This is broken out of init() so that it can be called separately
-   * from init() (to facilitate inserting additional nodes during plan
-   * partitioning w/o the need to call init() recursively on the whole tree again).
-   */
-  protected void computeStats(Analyzer analyzer) {
-    avgRowSize_ = 0.0F;
-    for (TupleId tid: tupleIds_) {
-      TupleDescriptor desc = analyzer.getTupleDesc(tid);
-      avgRowSize_ += desc.getAvgSerializedSize();
-    }
-    if (!children_.isEmpty()) numNodes_ = getChild(0).numNodes_;
-  }
-
-  protected long capAtLimit(long cardinality) {
-    if (hasLimit()) {
-      if (cardinality == -1) {
-        return limit_;
-      } else {
-        return Math.min(cardinality, limit_);
-      }
-    }
-    return cardinality;
-  }
-
-  /**
-   * Call computeMemLayout() for all materialized tuples.
-   */
-  protected void computeMemLayout(Analyzer analyzer) {
-    for (TupleId id: tupleIds_) {
-      analyzer.getDescTbl().getTupleDesc(id).computeMemLayout();
-    }
-  }
-
-  /**
-   * Returns the estimated combined selectivity of all conjuncts. Uses heuristics to
-   * address the following estimation challenges:
-   * 1. The individual selectivities of conjuncts may be unknown.
-   * 2. Two selectivities, whether known or unknown, could be correlated. Assuming
-   *    independence can lead to significant underestimation.
-   *
-   * The first issue is addressed by using a single default selectivity that is
-   * representative of all conjuncts with unknown selectivities.
-   * The second issue is addressed by an exponential backoff when multiplying each
-   * additional selectivity into the final result.
-   */
-  static protected double computeCombinedSelectivity(List<Expr> conjuncts) {
-    // Collect all estimated selectivities.
-    List<Double> selectivities = Lists.newArrayList();
-    for (Expr e: conjuncts) {
-      if (e.hasSelectivity()) selectivities.add(e.getSelectivity());
-    }
-    if (selectivities.size() != conjuncts.size()) {
-      // Some conjuncts have no estimated selectivity. Use a single default
-      // representative selectivity for all those conjuncts.
-      selectivities.add(Expr.DEFAULT_SELECTIVITY);
-    }
-    // Sort the selectivities to get a consistent estimate, regardless of the original
-    // conjunct order. Sort in ascending order such that the most selective conjunct
-    // is fully applied.
-    Collections.sort(selectivities);
-    double result = 1.0;
-    for (int i = 0; i < selectivities.size(); ++i) {
-      // Exponential backoff for each selectivity multiplied into the final result.
-      result *= Math.pow(selectivities.get(i), 1.0 / (double) (i + 1));
-    }
-    // Bound result in [0, 1]
-    return Math.max(0.0, Math.min(1.0, result));
-  }
-
-  protected double computeSelectivity() {
-    return computeCombinedSelectivity(conjuncts_);
-  }
-
-  // Convert this plan node into msg (excluding children), which requires setting
-  // the node type and the node-specific field.
-  protected abstract void toThrift(TPlanNode msg);
-
-  protected String debugString() {
-    // not using Objects.toStrHelper because
-    // PlanNode.debugString() is embedded by debug strings of the subclasses
-    StringBuilder output = new StringBuilder();
-    output.append("preds=" + Expr.debugString(conjuncts_));
-    output.append(" limit=" + Long.toString(limit_));
-    return output.toString();
-  }
-
-  protected String getExplainString(List<? extends Expr> exprs) {
-    if (exprs == null) return "";
-    StringBuilder output = new StringBuilder();
-    for (int i = 0; i < exprs.size(); ++i) {
-      if (i > 0) output.append(", ");
-      output.append(exprs.get(i).toSql());
-    }
-    return output.toString();
-  }
-
-  /**
-   * Returns true if stats-related variables are valid.
-   */
-  protected boolean hasValidStats() {
-    return (numNodes_ == -1 || numNodes_ >= 0) &&
-           (cardinality_ == -1 || cardinality_ >= 0);
-  }
-
-  /**
-   * Computes and returns the sum of two cardinalities. If an overflow occurs,
-   * the maximum Long value is returned (Long.MAX_VALUE).
-   */
-  public static long addCardinalities(long a, long b) {
-    try {
-      return LongMath.checkedAdd(a, b);
-    } catch (ArithmeticException e) {
-      LOG.warn("overflow when adding cardinalities: " + a + ", " + b);
-      return Long.MAX_VALUE;
-    }
-  }
-
-  /**
-   * Computes and returns the product of two cardinalities. If an overflow
-   * occurs, the maximum Long value is returned (Long.MAX_VALUE).
-   */
-  public static long multiplyCardinalities(long a, long b) {
-    try {
-      return LongMath.checkedMultiply(a, b);
-    } catch (ArithmeticException e) {
-      LOG.warn("overflow when multiplying cardinalities: " + a + ", " + b);
-      return Long.MAX_VALUE;
-    }
-  }
-
-  /**
-   * Returns true if this plan node can output its first row only after consuming
-   * all rows of all its children. This method is used to group plan nodes
-   * into pipelined units for resource estimation.
-   */
-  public boolean isBlockingNode() { return false; }
-
-  /**
-   * Estimates the cost of executing this PlanNode. Currently only sets perHostMemCost_.
-   * May only be called after this PlanNode has been placed in a PlanFragment because
-   * the cost computation is dependent on the enclosing fragment's data partition.
-   */
-  public void computeCosts(TQueryOptions queryOptions) {
-    perHostMemCost_ = 0;
-  }
-
-  /**
-   * The input cardinality is the sum of output cardinalities of its children.
-   * For scan nodes the input cardinality is the expected number of rows scanned.
-   */
-  public long getInputCardinality() {
-    long sum = 0;
-    for(PlanNode p : children_) {
-      long tmp = p.getCardinality();
-      if (tmp == -1) return -1;
-      sum = addCardinalities(sum, tmp);
-    }
-    return sum;
-  }
-
-  protected void addRuntimeFilter(RuntimeFilter filter) { runtimeFilters_.add(filter); }
-
-  protected Collection<RuntimeFilter> getRuntimeFilters() { return runtimeFilters_; }
-
-  protected String getRuntimeFilterExplainString(boolean isBuildNode) {
-    if (runtimeFilters_.isEmpty()) return "";
-    final String applyNodeFilterFormat = "%s -> %s";
-    final String buildNodeFilterFormat = "%s <- %s";
-    String format = isBuildNode ? buildNodeFilterFormat : applyNodeFilterFormat;
-    StringBuilder output = new StringBuilder();
-    List<String> filtersStr = Lists.newArrayList();
-    for (RuntimeFilter filter: runtimeFilters_) {
-      Expr expr = null;
-      if (isBuildNode) {
-        expr = filter.getSrcExpr();
-      } else {
-        expr = filter.getTargetExpr(getId());
-      }
-      Preconditions.checkNotNull(expr);
-      filtersStr.add(String.format(format, filter.getFilterId(), expr.toSql()));
-    }
-    output.append(Joiner.on(", ").join(filtersStr) + "\n");
-    return output.toString();
-  }
-
-  /**
-   * Sort a list of conjuncts into an estimated cheapest order to evaluate them in, based
-   * on estimates of the cost to evaluate and selectivity of the expressions. Should be
-   * called during PlanNode.init for any PlanNode that could have a conjunct list.
-   *
-   * The conjuncts are sorted by repeatedly iterating over them and choosing the conjunct
-   * that would result in the least total estimated work were it to be applied before the
-   * remaining conjuncts.
-   *
-   * As in computeCombinedSelecivity, the selectivities are exponentially backed off over
-   * the iterations, to reflect the possibility that the conjuncts may be correlated, and
-   * Exprs without selectivity estimates are given a reasonable default.
-   */
-  public static <T extends Expr> List<T> orderConjunctsByCost(List<T> conjuncts) {
-    if (conjuncts.size() <= 1) return conjuncts;
-
-    float totalCost = 0;
-    int numWithoutSel = 0;
-    List<T> remaining = Lists.newArrayListWithCapacity(conjuncts.size());
-    for (T e : conjuncts) {
-      Preconditions.checkState(e.hasCost());
-      totalCost += e.getCost();
-      remaining.add(e);
-      if (!e.hasSelectivity()) {
-        ++numWithoutSel;
-      }
-    }
-
-    // We distribute the DEFAULT_SELECTIVITY over the conjuncts without a selectivity
-    // estimate so that their combined selectivities equal DEFAULT_SELECTIVITY, i.e.
-    // Math.pow(defaultSel, numWithoutSel) = Expr.DEFAULT_SELECTIVITY
-    double defaultSel = Expr.DEFAULT_SELECTIVITY;
-    if (numWithoutSel != 0) {
-      defaultSel = Math.pow(Math.E, Math.log(Expr.DEFAULT_SELECTIVITY) / numWithoutSel);
-    }
-
-    List<T> sortedConjuncts = Lists.newArrayListWithCapacity(conjuncts.size());
-    while (!remaining.isEmpty()) {
-      double smallestCost = Float.MAX_VALUE;
-      T bestConjunct =  null;
-      double backoffExp = 1.0 / (double) (sortedConjuncts.size() + 1);
-      for (T e : remaining) {
-        double sel = Math.pow(e.hasSelectivity() ? e.getSelectivity() : defaultSel,
-            backoffExp);
-
-        // The cost of evaluating this conjunct first is estimated as the cost of
-        // applying this conjunct to all rows plus the cost of applying all the
-        // remaining conjuncts to the number of rows we expect to remain given
-        // this conjunct's selectivity, exponentially backed off.
-        double cost = e.getCost() + (totalCost - e.getCost()) * sel;
-        if (cost < smallestCost) {
-          smallestCost = cost;
-          bestConjunct = e;
-        }
-      }
-
-      sortedConjuncts.add(bestConjunct);
-      remaining.remove(bestConjunct);
-      totalCost -= bestConjunct.getCost();
-    }
-
-    return sortedConjuncts;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlanNodeId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlanNodeId.java b/fe/src/main/java/com/cloudera/impala/planner/PlanNodeId.java
deleted file mode 100644
index d161e2b..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlanNodeId.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class PlanNodeId extends Id<PlanNodeId> {
-  // Construction only allowed via an IdGenerator.
-  protected PlanNodeId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<PlanNodeId> createGenerator() {
-    return new IdGenerator<PlanNodeId>() {
-      @Override
-      public PlanNodeId getNextId() { return new PlanNodeId(nextId_++); }
-      @Override
-      public PlanNodeId getMaxId() { return new PlanNodeId(nextId_ - 1); }
-    };
-  }
-
-  @Override
-  public String toString() {
-    return String.format("%02d", id_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/Planner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/Planner.java b/fe/src/main/java/com/cloudera/impala/planner/Planner.java
deleted file mode 100644
index df90df3..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/Planner.java
+++ /dev/null
@@ -1,456 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.ColumnLineageGraph;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryExecRequest;
-import com.cloudera.impala.thrift.TRuntimeFilterMode;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.MaxRowsProcessedVisitor;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Creates an executable plan from an analyzed parse tree and query options.
- */
-public class Planner {
-  private final static Logger LOG = LoggerFactory.getLogger(Planner.class);
-
-  private final PlannerContext ctx_;
-
-  public Planner(AnalysisContext.AnalysisResult analysisResult, TQueryCtx queryCtx) {
-    ctx_ = new PlannerContext(analysisResult, queryCtx);
-  }
-
-  /**
-   * Returns a list of plan fragments for executing an analyzed parse tree.
-   * May return a single-node or distributed executable plan. If enabled (through a
-   * query option), computes runtime filters for dynamic partition pruning.
-   *
-   * Plan generation may fail and throw for the following reasons:
-   * 1. Expr evaluation failed, e.g., during partition pruning.
-   * 2. A certain feature is not yet implemented, e.g., physical join implementation for
-   *    outer/semi joins without equi conjuncts.
-   * 3. Expr substitution failed, e.g., because an expr was substituted with a type that
-   *    render the containing expr semantically invalid. Analysis should have ensured
-   *    that such an expr substitution during plan generation never fails. If it does,
-   *    that typically means there is a bug in analysis, or a broken/missing smap.
-   */
-  public ArrayList<PlanFragment> createPlan() throws ImpalaException {
-    SingleNodePlanner singleNodePlanner = new SingleNodePlanner(ctx_);
-    DistributedPlanner distributedPlanner = new DistributedPlanner(ctx_);
-    PlanNode singleNodePlan = singleNodePlanner.createSingleNodePlan();
-    ctx_.getRootAnalyzer().getTimeline().markEvent("Single node plan created");
-    ArrayList<PlanFragment> fragments = null;
-
-    // Determine the maximum number of rows processed by any node in the plan tree
-    MaxRowsProcessedVisitor visitor = new MaxRowsProcessedVisitor();
-    singleNodePlan.accept(visitor);
-    long maxRowsProcessed = visitor.get() == -1 ? Long.MAX_VALUE : visitor.get();
-    boolean isSmallQuery =
-        maxRowsProcessed < ctx_.getQueryOptions().exec_single_node_rows_threshold;
-    if (isSmallQuery) {
-      // Execute on a single node and disable codegen for small results
-      ctx_.getQueryOptions().setNum_nodes(1);
-      ctx_.getQueryOptions().setDisable_codegen(true);
-      if (maxRowsProcessed < ctx_.getQueryOptions().batch_size ||
-          maxRowsProcessed < 1024 && ctx_.getQueryOptions().batch_size == 0) {
-        // Only one scanner thread for small queries
-        ctx_.getQueryOptions().setNum_scanner_threads(1);
-      }
-      // disable runtime filters
-      ctx_.getQueryOptions().setRuntime_filter_mode(TRuntimeFilterMode.OFF);
-    }
-
-    // Join rewrites.
-    invertJoins(singleNodePlan, ctx_.isSingleNodeExec());
-    singleNodePlan = useNljForSingularRowBuilds(singleNodePlan, ctx_.getRootAnalyzer());
-
-    // create runtime filters
-    if (ctx_.getQueryOptions().getRuntime_filter_mode() != TRuntimeFilterMode.OFF) {
-      // Always compute filters, even if the BE won't always use all of them.
-      RuntimeFilterGenerator.generateRuntimeFilters(ctx_.getRootAnalyzer(),
-          singleNodePlan, ctx_.getQueryOptions().getMax_num_runtime_filters());
-      ctx_.getRootAnalyzer().getTimeline().markEvent(
-          "Runtime filters computed");
-    }
-
-    if (ctx_.isSingleNodeExec()) {
-      // create one fragment containing the entire single-node plan tree
-      fragments = Lists.newArrayList(new PlanFragment(
-          ctx_.getNextFragmentId(), singleNodePlan, DataPartition.UNPARTITIONED));
-    } else {
-      singleNodePlanner.validatePlan(singleNodePlan);
-      // create distributed plan
-      fragments = distributedPlanner.createPlanFragments(singleNodePlan);
-    }
-
-    PlanFragment rootFragment = fragments.get(fragments.size() - 1);
-    rootFragment.verifyTree();
-    ExprSubstitutionMap rootNodeSmap = rootFragment.getPlanRoot().getOutputSmap();
-    List<Expr> resultExprs = null;
-    if (ctx_.isInsertOrCtas()) {
-      InsertStmt insertStmt = ctx_.getAnalysisResult().getInsertStmt();
-      insertStmt.substituteResultExprs(rootNodeSmap, ctx_.getRootAnalyzer());
-      if (!ctx_.isSingleNodeExec()) {
-        // repartition on partition keys
-        rootFragment = distributedPlanner.createInsertFragment(
-            rootFragment, insertStmt, ctx_.getRootAnalyzer(), fragments);
-      }
-      // set up table sink for root fragment
-      rootFragment.setSink(insertStmt.createDataSink());
-      resultExprs = insertStmt.getResultExprs();
-    } else {
-      if (ctx_.isUpdate()) {
-        // Set up update sink for root fragment
-        rootFragment.setSink(ctx_.getAnalysisResult().getUpdateStmt().createDataSink());
-      } else if (ctx_.isDelete()) {
-        // Set up delete sink for root fragment
-        rootFragment.setSink(ctx_.getAnalysisResult().getDeleteStmt().createDataSink());
-      }
-      QueryStmt queryStmt = ctx_.getQueryStmt();
-      queryStmt.substituteResultExprs(rootNodeSmap, ctx_.getRootAnalyzer());
-      resultExprs = queryStmt.getResultExprs();
-    }
-    rootFragment.setOutputExprs(resultExprs);
-
-    LOG.debug("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString());
-    LOG.debug("resultexprs: " + Expr.debugString(rootFragment.getOutputExprs()));
-    LOG.debug("finalize plan fragments");
-    for (PlanFragment fragment: fragments) {
-      fragment.finalize(ctx_.getRootAnalyzer());
-    }
-
-    Collections.reverse(fragments);
-    ctx_.getRootAnalyzer().getTimeline().markEvent("Distributed plan created");
-
-    ColumnLineageGraph graph = ctx_.getRootAnalyzer().getColumnLineageGraph();
-    if (RuntimeEnv.INSTANCE.computeLineage() || RuntimeEnv.INSTANCE.isTestEnv()) {
-      // Compute the column lineage graph
-      if (ctx_.isInsertOrCtas()) {
-        Table targetTable = ctx_.getAnalysisResult().getInsertStmt().getTargetTable();
-        graph.addTargetColumnLabels(targetTable);
-        Preconditions.checkNotNull(targetTable);
-        List<Expr> exprs = Lists.newArrayList();
-        if (targetTable instanceof HBaseTable) {
-          exprs.addAll(resultExprs);
-        } else {
-          exprs.addAll(ctx_.getAnalysisResult().getInsertStmt().getPartitionKeyExprs());
-          exprs.addAll(resultExprs.subList(0,
-              targetTable.getNonClusteringColumns().size()));
-        }
-        graph.computeLineageGraph(exprs, ctx_.getRootAnalyzer());
-      } else {
-        graph.addTargetColumnLabels(ctx_.getQueryStmt().getColLabels());
-        graph.computeLineageGraph(resultExprs, ctx_.getRootAnalyzer());
-      }
-      LOG.trace("lineage: " + graph.debugString());
-      ctx_.getRootAnalyzer().getTimeline().markEvent("Lineage info computed");
-    }
-
-    return fragments;
-  }
-
-  /**
-   * Return a list of plans, each represented by the root of their fragment trees.
-   * TODO: roll into createPlan()
-   */
-  public List<PlanFragment> createParallelPlans() throws ImpalaException {
-    ArrayList<PlanFragment> distrPlan = createPlan();
-    Preconditions.checkNotNull(distrPlan);
-    ParallelPlanner planner = new ParallelPlanner(ctx_);
-    List<PlanFragment> parallelPlans = planner.createPlans(distrPlan.get(0));
-    ctx_.getRootAnalyzer().getTimeline().markEvent("Parallel plans created");
-    return parallelPlans;
-  }
-
-  /**
-   * Return combined explain string for all plan fragments.
-   * Includes the estimated resource requirements from the request if set.
-   */
-  public String getExplainString(ArrayList<PlanFragment> fragments,
-      TQueryExecRequest request, TExplainLevel explainLevel) {
-    StringBuilder str = new StringBuilder();
-    boolean hasHeader = false;
-    if (request.isSetPer_host_mem_req() && request.isSetPer_host_vcores()) {
-      str.append(
-          String.format("Estimated Per-Host Requirements: Memory=%s VCores=%s\n",
-          PrintUtils.printBytes(request.getPer_host_mem_req()),
-          request.per_host_vcores));
-      hasHeader = true;
-    }
-
-    // IMPALA-1983 In the case of corrupt stats, issue a warning for all queries except
-    // child queries of 'compute stats'.
-    if (!request.query_ctx.isSetParent_query_id() &&
-        request.query_ctx.isSetTables_with_corrupt_stats() &&
-        !request.query_ctx.getTables_with_corrupt_stats().isEmpty()) {
-      List<String> tableNames = Lists.newArrayList();
-      for (TTableName tableName: request.query_ctx.getTables_with_corrupt_stats()) {
-        tableNames.add(tableName.db_name + "." + tableName.table_name);
-      }
-      str.append(
-          "WARNING: The following tables have potentially corrupt table statistics.\n" +
-          "Drop and re-compute statistics to resolve this problem.\n" +
-          Joiner.on(", ").join(tableNames) + "\n");
-      hasHeader = true;
-    }
-
-    // Append warning about tables missing stats except for child queries of
-    // 'compute stats'. The parent_query_id is only set for compute stats child queries.
-    if (!request.query_ctx.isSetParent_query_id() &&
-        request.query_ctx.isSetTables_missing_stats() &&
-        !request.query_ctx.getTables_missing_stats().isEmpty()) {
-      List<String> tableNames = Lists.newArrayList();
-      for (TTableName tableName: request.query_ctx.getTables_missing_stats()) {
-        tableNames.add(tableName.db_name + "." + tableName.table_name);
-      }
-      str.append("WARNING: The following tables are missing relevant table " +
-          "and/or column statistics.\n" + Joiner.on(", ").join(tableNames) + "\n");
-      hasHeader = true;
-    }
-
-    if (request.query_ctx.isDisable_spilling()) {
-      str.append("WARNING: Spilling is disabled for this query as a safety guard.\n" +
-          "Reason: Query option disable_unsafe_spills is set, at least one table\n" +
-          "is missing relevant stats, and no plan hints were given.\n");
-      hasHeader = true;
-    }
-    if (hasHeader) str.append("\n");
-
-    if (explainLevel.ordinal() < TExplainLevel.VERBOSE.ordinal()) {
-      // Print the non-fragmented parallel plan.
-      str.append(fragments.get(0).getExplainString(explainLevel));
-    } else {
-      // Print the fragmented parallel plan.
-      for (int i = 0; i < fragments.size(); ++i) {
-        PlanFragment fragment = fragments.get(i);
-        str.append(fragment.getExplainString(explainLevel));
-        if (explainLevel == TExplainLevel.VERBOSE && i + 1 != fragments.size()) {
-          str.append("\n");
-        }
-      }
-    }
-    return str.toString();
-  }
-
-  /**
-   * Returns true if the fragments are for a trivial, coordinator-only query:
-   * Case 1: Only an EmptySetNode, e.g. query has a limit 0.
-   * Case 2: Query has only constant exprs.
-   */
-  private static boolean isTrivialCoordOnlyPlan(List<PlanFragment> fragments) {
-    Preconditions.checkNotNull(fragments);
-    Preconditions.checkState(!fragments.isEmpty());
-    if (fragments.size() > 1) return false;
-    PlanNode root = fragments.get(0).getPlanRoot();
-    if (root instanceof EmptySetNode) return true;
-    if (root instanceof UnionNode && ((UnionNode) root).isConstantUnion()) return true;
-    return false;
-  }
-
-  /**
-   * Estimates the per-host memory and CPU requirements for the given plan fragments,
-   * and sets the results in request.
-   * Optionally excludes the requirements for unpartitioned fragments.
-   * TODO: The LOG.warn() messages should eventually become Preconditions checks
-   * once resource estimation is more robust.
-   * TODO: Revisit and possibly remove during MT work, particularly references to vcores.
-   */
-  public void computeResourceReqs(List<PlanFragment> fragments,
-      boolean excludeUnpartitionedFragments,
-      TQueryExecRequest request) {
-    Preconditions.checkState(!fragments.isEmpty());
-    Preconditions.checkNotNull(request);
-
-    // Compute pipelined plan node sets.
-    ArrayList<PipelinedPlanNodeSet> planNodeSets =
-        PipelinedPlanNodeSet.computePlanNodeSets(fragments.get(0).getPlanRoot());
-
-    // Compute the max of the per-host mem and vcores requirement.
-    // Note that the max mem and vcores may come from different plan node sets.
-    long maxPerHostMem = Long.MIN_VALUE;
-    int maxPerHostVcores = Integer.MIN_VALUE;
-    for (PipelinedPlanNodeSet planNodeSet: planNodeSets) {
-      if (!planNodeSet.computeResourceEstimates(
-          excludeUnpartitionedFragments, ctx_.getQueryOptions())) {
-        continue;
-      }
-      long perHostMem = planNodeSet.getPerHostMem();
-      int perHostVcores = planNodeSet.getPerHostVcores();
-      if (perHostMem > maxPerHostMem) maxPerHostMem = perHostMem;
-      if (perHostVcores > maxPerHostVcores) maxPerHostVcores = perHostVcores;
-    }
-
-    // Do not ask for more cores than are in the RuntimeEnv.
-    maxPerHostVcores = Math.min(maxPerHostVcores, RuntimeEnv.INSTANCE.getNumCores());
-
-    // Special case for some trivial coordinator-only queries (IMPALA-3053, IMPALA-1092).
-    if (isTrivialCoordOnlyPlan(fragments)) {
-      maxPerHostMem = 1024;
-      maxPerHostVcores = 1;
-    }
-
-    // Set costs to zero if there are only unpartitioned fragments and
-    // excludeUnpartitionedFragments is true.
-    // TODO: handle this case with a better indication for unknown, e.g. -1 or not set.
-    if (maxPerHostMem == Long.MIN_VALUE || maxPerHostVcores == Integer.MIN_VALUE) {
-      boolean allUnpartitioned = true;
-      for (PlanFragment fragment: fragments) {
-        if (fragment.isPartitioned()) {
-          allUnpartitioned = false;
-          break;
-        }
-      }
-      if (allUnpartitioned && excludeUnpartitionedFragments) {
-        maxPerHostMem = 0;
-        maxPerHostVcores = 0;
-      }
-    }
-
-    if (maxPerHostMem < 0 || maxPerHostMem == Long.MIN_VALUE) {
-      LOG.warn("Invalid per-host memory requirement: " + maxPerHostMem);
-    }
-    if (maxPerHostVcores < 0 || maxPerHostVcores == Integer.MIN_VALUE) {
-      LOG.warn("Invalid per-host virtual cores requirement: " + maxPerHostVcores);
-    }
-    request.setPer_host_mem_req(maxPerHostMem);
-    request.setPer_host_vcores((short) maxPerHostVcores);
-
-    LOG.debug("Estimated per-host peak memory requirement: " + maxPerHostMem);
-    LOG.debug("Estimated per-host virtual cores requirement: " + maxPerHostVcores);
-  }
-
-  /**
-   * Traverses the plan tree rooted at 'root' and inverts outer and semi joins
-   * in the following situations:
-   * 1. If the left-hand side is a SingularRowSrcNode then we invert the join because
-   *    then the build side is guaranteed to have only a single row.
-   * 2. There is no backend support for distributed non-equi right outer/semi joins,
-   *    so we invert them (any distributed left semi/outer join is ok).
-   * 3. Invert semi/outer joins if the right-hand size is estimated to have a higher
-   *    cardinality*avgSerializedSize. Do not invert if relevant stats are missing.
-   * The first two inversion rules are independent of the presence/absence of stats.
-   * Left Null Aware Anti Joins are never inverted due to lack of backend support.
-   * Joins that originate from query blocks with a straight join hint are not inverted.
-   * The 'isLocalPlan' parameter indicates whether the plan tree rooted at 'root'
-   * will be executed locally within one machine, i.e., without any data exchanges.
-   */
-  private void invertJoins(PlanNode root, boolean isLocalPlan) {
-    if (root instanceof SubplanNode) {
-      invertJoins(root.getChild(0), isLocalPlan);
-      invertJoins(root.getChild(1), true);
-    } else {
-      for (PlanNode child: root.getChildren()) invertJoins(child, isLocalPlan);
-    }
-
-    if (root instanceof JoinNode) {
-      JoinNode joinNode = (JoinNode) root;
-      JoinOperator joinOp = joinNode.getJoinOp();
-
-      // 1. No inversion allowed due to straight join.
-      // 2. The null-aware left anti-join operator is not considered for inversion.
-      //    There is no backend support for a null-aware right anti-join because
-      //    we cannot execute it efficiently.
-      if (joinNode.isStraightJoin() || joinOp.isNullAwareLeftAntiJoin()) {
-        // Re-compute tuple ids since their order must correspond to the order of children.
-        root.computeTupleIds();
-        return;
-      }
-
-      if (joinNode.getChild(0) instanceof SingularRowSrcNode) {
-        // Always place a singular row src on the build side because it
-        // only produces a single row.
-        joinNode.invertJoin();
-      } else if (!isLocalPlan && joinNode instanceof NestedLoopJoinNode &&
-          (joinOp.isRightSemiJoin() || joinOp.isRightOuterJoin())) {
-        // The current join is a distributed non-equi right outer or semi join
-        // which has no backend support. Invert the join to make it executable.
-        joinNode.invertJoin();
-      } else {
-        // Invert the join if doing so reduces the size of the materialized rhs
-        // (may also reduce network costs depending on the join strategy).
-        // Only consider this optimization if both the lhs/rhs cardinalities are known.
-        long lhsCard = joinNode.getChild(0).getCardinality();
-        long rhsCard = joinNode.getChild(1).getCardinality();
-        float lhsAvgRowSize = joinNode.getChild(0).getAvgRowSize();
-        float rhsAvgRowSize = joinNode.getChild(1).getAvgRowSize();
-        if (lhsCard != -1 && rhsCard != -1 &&
-            lhsCard * lhsAvgRowSize < rhsCard * rhsAvgRowSize) {
-          joinNode.invertJoin();
-        }
-      }
-    }
-
-    // Re-compute tuple ids because the backend assumes that their order corresponds to
-    // the order of children.
-    root.computeTupleIds();
-  }
-
-  /**
-   * Converts hash joins to nested-loop joins if the right-side is a SingularRowSrcNode.
-   * Does not convert Null Aware Anti Joins because we only support that join op with
-   * a hash join.
-   * Throws if JoinNode.init() fails on the new nested-loop join node.
-   */
-  private PlanNode useNljForSingularRowBuilds(PlanNode root, Analyzer analyzer)
-      throws ImpalaException {
-    for (int i = 0; i < root.getChildren().size(); ++i) {
-      root.setChild(i, useNljForSingularRowBuilds(root.getChild(i), analyzer));
-    }
-    if (!(root instanceof JoinNode)) return root;
-    if (root instanceof NestedLoopJoinNode) return root;
-    if (!(root.getChild(1) instanceof SingularRowSrcNode)) return root;
-    JoinNode joinNode = (JoinNode) root;
-    if (joinNode.getJoinOp().isNullAwareLeftAntiJoin()) {
-      Preconditions.checkState(joinNode instanceof HashJoinNode);
-      return root;
-    }
-    List<Expr> otherJoinConjuncts = Lists.newArrayList(joinNode.getOtherJoinConjuncts());
-    otherJoinConjuncts.addAll(joinNode.getEqJoinConjuncts());
-    JoinNode newJoinNode = new NestedLoopJoinNode(joinNode.getChild(0),
-        joinNode.getChild(1), joinNode.isStraightJoin(),
-        joinNode.getDistributionModeHint(), joinNode.getJoinOp(), otherJoinConjuncts);
-    newJoinNode.getConjuncts().addAll(joinNode.getConjuncts());
-    newJoinNode.setId(joinNode.getId());
-    newJoinNode.init(analyzer);
-    return newJoinNode;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/PlannerContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/PlannerContext.java b/fe/src/main/java/com/cloudera/impala/planner/PlannerContext.java
deleted file mode 100644
index fc11287..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/PlannerContext.java
+++ /dev/null
@@ -1,100 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.LinkedList;
-
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.collect.Lists;
-
-/**
- * Contains the analysis result of a query as well as planning-specific
- * parameters and state such as plan-node and plan-fragment id generators.
- */
-public class PlannerContext {
-  // Estimate of the overhead imposed by storing data in a hash tbl;
-  // used for determining whether a broadcast join is feasible.
-  public final static double HASH_TBL_SPACE_OVERHEAD = 1.1;
-
-  // The maximum fraction of remaining memory that a sort node can use during execution.
-  public final static double SORT_MEM_MAX_FRACTION = 0.80;
-
-  // Assumed average number of items in a nested collection, since we currently have no
-  // statistics on nested fields. The motivation for this constant is to avoid
-  // pathological plan choices that could result from a SubplanNode having an unknown
-  // cardinality (due to UnnestNodes not knowing their cardinality), or from a ScanNode
-  // significantly underestimating its output cardinality because intermediate collections
-  // are not accounted for at all. For example, we will place a table ref plan with a
-  // SubplanNode on the build side of a join due to an unknown cardinality if the other
-  // input is a base table scan with stats.
-  // The constant value was chosen arbitrarily to not be "too high" or "too low".
-  // TODO: Compute stats for nested types and pick them up here.
-  public static final long AVG_COLLECTION_SIZE = 10;
-
-  private final IdGenerator<PlanNodeId> nodeIdGenerator_ = PlanNodeId.createGenerator();
-  private final IdGenerator<PlanFragmentId> fragmentIdGenerator_ =
-      PlanFragmentId.createGenerator();
-
-  // Keeps track of subplan nesting. Maintained with push/popSubplan().
-  private final LinkedList<SubplanNode> subplans_ = Lists.newLinkedList();
-
-  private final TQueryCtx queryCtx_;
-  private final AnalysisContext.AnalysisResult analysisResult_;
-  private final QueryStmt queryStmt_;
-
-  public PlannerContext (AnalysisContext.AnalysisResult analysisResult,
-      TQueryCtx queryCtx) {
-    analysisResult_ = analysisResult;
-    queryCtx_ = queryCtx;
-    if (isInsertOrCtas()) {
-      queryStmt_ = analysisResult.getInsertStmt().getQueryStmt();
-    } else if (analysisResult.isUpdateStmt()) {
-      queryStmt_ = analysisResult.getUpdateStmt().getQueryStmt();
-    } else if (analysisResult.isDeleteStmt()) {
-      queryStmt_ = analysisResult.getDeleteStmt().getQueryStmt();
-    } else {
-      queryStmt_ = analysisResult.getQueryStmt();
-    }
-  }
-
-  public QueryStmt getQueryStmt() { return queryStmt_; }
-  public TQueryCtx getQueryCtx() { return queryCtx_; }
-  public TQueryOptions getQueryOptions() {
-    return queryCtx_.getRequest().getQuery_options();
-  }
-  public AnalysisContext.AnalysisResult getAnalysisResult() { return analysisResult_; }
-  public Analyzer getRootAnalyzer() { return analysisResult_.getAnalyzer(); }
-  public boolean isSingleNodeExec() { return getQueryOptions().num_nodes == 1; }
-  public PlanNodeId getNextNodeId() { return nodeIdGenerator_.getNextId(); }
-  public PlanFragmentId getNextFragmentId() { return fragmentIdGenerator_.getNextId(); }
-  public boolean isInsertOrCtas() {
-    return analysisResult_.isInsertStmt() || analysisResult_.isCreateTableAsSelectStmt();
-  }
-
-  public boolean hasSubplan() { return !subplans_.isEmpty(); }
-  public SubplanNode getSubplan() { return subplans_.getFirst(); }
-  public boolean pushSubplan(SubplanNode n) { return subplans_.offerFirst(n); }
-  public void popSubplan() { subplans_.removeFirst(); }
-  public boolean isUpdate() { return analysisResult_.isUpdateStmt(); }
-  public boolean isDelete() { return analysisResult_.isDeleteStmt(); }
-}


[21/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HBaseScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HBaseScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/HBaseScanNode.java
deleted file mode 100644
index e31372d..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HBaseScanNode.java
+++ /dev/null
@@ -1,510 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HBaseColumn;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THBaseFilter;
-import com.cloudera.impala.thrift.THBaseKeyRange;
-import com.cloudera.impala.thrift.THBaseScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * Full scan of an HBase table.
- * Only families/qualifiers specified in TupleDescriptor will be retrieved in the backend.
- */
-public class HBaseScanNode extends ScanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(HBaseScanNode.class);
-  private final TupleDescriptor desc_;
-
-  // One range per clustering column. The range bounds are expected to be constants.
-  // A null entry means there's no range restriction for that particular key.
-  // If keyRanges is non-null it always contains as many entries as there are clustering
-  // cols.
-  private List<ValueRange> keyRanges_;
-
-  // derived from keyRanges_; empty means unbounded;
-  // initialize start/stopKey_ to be unbounded.
-  private byte[] startKey_ = HConstants.EMPTY_START_ROW;
-  private byte[] stopKey_ = HConstants.EMPTY_END_ROW;
-
-  // True if this scan node is not going to scan anything. If the row key filter
-  // evaluates to null, or if the lower bound > upper bound, then this scan node won't
-  // scan at all.
-  private boolean isEmpty_ = false;
-
-  // List of HBase Filters for generating thrift message. Filled in finalize().
-  private final List<THBaseFilter> filters_ = new ArrayList<THBaseFilter>();
-
-  // The suggested value for "hbase.client.scan.setCaching", which batches maxCaching
-  // rows per fetch request to the HBase region server. If the value is too high,
-  // then the hbase region server will have a hard time (GC pressure and long response
-  // times). If the value is too small, then there will be extra trips to the hbase
-  // region server.
-  // Default to 1024 and update it based on row size estimate such that each batch size
-  // won't exceed 500MB.
-  private final static int MAX_HBASE_FETCH_BATCH_SIZE = 500 * 1024 * 1024;
-  private final static int DEFAULT_SUGGESTED_CACHING = 1024;
-  private int suggestedCaching_ = DEFAULT_SUGGESTED_CACHING;
-
-  // HBase config; Common across all object instance.
-  private static Configuration hbaseConf_ = HBaseConfiguration.create();
-
-  public HBaseScanNode(PlanNodeId id, TupleDescriptor desc) {
-    super(id, desc, "SCAN HBASE");
-    desc_ = desc;
-  }
-
-  public void setKeyRanges(List<ValueRange> keyRanges) {
-    Preconditions.checkNotNull(keyRanges);
-    keyRanges_ = keyRanges;
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    checkForSupportedFileFormats();
-    assignConjuncts(analyzer);
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    setStartStopKey(analyzer);
-    // Convert predicates to HBase filters_.
-    createHBaseFilters(analyzer);
-
-    // materialize slots in remaining conjuncts_
-    analyzer.materializeSlots(conjuncts_);
-    computeMemLayout(analyzer);
-    computeScanRangeLocations(analyzer);
-
-    // Call computeStats() after materializing slots and computing the mem layout.
-    computeStats(analyzer);
-  }
-
-  /**
-   * Convert keyRanges_ to startKey_ and stopKey_.
-   * If ValueRange is not null, transform it into start/stopKey_ by evaluating the
-   * expression. Analysis has checked that the expression is string type. If the
-   * expression evaluates to null, then there's nothing to scan because Hbase row key
-   * cannot be null.
-   * At present, we only do row key filtering for string-mapped keys. String-mapped keys
-   * are always encded as ascii.
-   * ValueRange is null if there is no predicate on the row-key.
-   */
-  private void setStartStopKey(Analyzer analyzer) throws InternalException {
-    Preconditions.checkNotNull(keyRanges_);
-    Preconditions.checkState(keyRanges_.size() == 1);
-
-    ValueRange rowRange = keyRanges_.get(0);
-    if (rowRange != null) {
-      if (rowRange.getLowerBound() != null) {
-        Preconditions.checkState(rowRange.getLowerBound().isConstant());
-        Preconditions.checkState(
-            rowRange.getLowerBound().getType().equals(Type.STRING));
-        TColumnValue val = FeSupport.EvalConstExpr(rowRange.getLowerBound(),
-            analyzer.getQueryCtx());
-        if (!val.isSetString_val()) {
-          // lower bound is null.
-          isEmpty_ = true;
-          return;
-        } else {
-          startKey_ = convertToBytes(val.getString_val(),
-              !rowRange.getLowerBoundInclusive());
-        }
-      }
-      if (rowRange.getUpperBound() != null) {
-        Preconditions.checkState(rowRange.getUpperBound().isConstant());
-        Preconditions.checkState(
-            rowRange.getUpperBound().getType().equals(Type.STRING));
-        TColumnValue val = FeSupport.EvalConstExpr(rowRange.getUpperBound(),
-            analyzer.getQueryCtx());
-        if (!val.isSetString_val()) {
-          // upper bound is null.
-          isEmpty_ = true;
-          return;
-        } else {
-          stopKey_ = convertToBytes(val.getString_val(),
-              rowRange.getUpperBoundInclusive());
-        }
-      }
-    }
-
-    boolean endKeyIsEndOfTable = Bytes.equals(stopKey_, HConstants.EMPTY_END_ROW);
-    if ((Bytes.compareTo(startKey_, stopKey_) > 0) && !endKeyIsEndOfTable) {
-      // Lower bound is greater than upper bound.
-      isEmpty_ = true;
-    }
-  }
-
-  /**
-   * Also sets suggestedCaching_.
-   */
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    HBaseTable tbl = (HBaseTable) desc_.getTable();
-
-    ValueRange rowRange = keyRanges_.get(0);
-    if (isEmpty_) {
-      cardinality_ = 0;
-    } else if (rowRange != null && rowRange.isEqRange()) {
-      cardinality_ = 1;
-    } else {
-      // Set maxCaching so that each fetch from hbase won't return a batch of more than
-      // MAX_HBASE_FETCH_BATCH_SIZE bytes.
-      Pair<Long, Long> estimate = tbl.getEstimatedRowStats(startKey_, stopKey_);
-      cardinality_ = estimate.first.longValue();
-      if (estimate.second.longValue() > 0) {
-        suggestedCaching_ = (int)
-            Math.max(MAX_HBASE_FETCH_BATCH_SIZE / estimate.second.longValue(), 1);
-      }
-    }
-    inputCardinality_ = cardinality_;
-
-    cardinality_ *= computeSelectivity();
-    cardinality_ = Math.max(1, cardinality_);
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HbaseScan: cardinality=" + Long.toString(cardinality_));
-
-    // TODO: take actual regions into account
-    numNodes_ = tbl.getNumNodes();
-    LOG.debug("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_));
-  }
-
-  @Override
-  protected String debugString() {
-    HBaseTable tbl = (HBaseTable) desc_.getTable();
-    return Objects.toStringHelper(this)
-        .add("tid", desc_.getId().asInt())
-        .add("hiveTblName", tbl.getFullName())
-        .add("hbaseTblName", tbl.getHBaseTableName())
-        .add("startKey", ByteBuffer.wrap(startKey_).toString())
-        .add("stopKey", ByteBuffer.wrap(stopKey_).toString())
-        .add("isEmpty", isEmpty_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  // We convert predicates of the form <slotref> op <constant> where slotref is of
-  // type string to HBase filters. All these predicates are also evaluated at
-  // the HBaseScanNode. To properly filter out NULL values HBaseScanNode treats all
-  // predicates as disjunctive, thereby requiring re-evaluation when there are multiple
-  // attributes. We explicitly materialize the referenced slots, otherwise our hbase
-  // scans don't return correct data.
-  // TODO: expand this to generate nested filter lists for arbitrary conjunctions
-  // and disjunctions.
-  private void createHBaseFilters(Analyzer analyzer) {
-    for (Expr e: conjuncts_) {
-      // We only consider binary predicates
-      if (!(e instanceof BinaryPredicate)) continue;
-      BinaryPredicate bp = (BinaryPredicate) e;
-      CompareFilter.CompareOp hbaseOp = impalaOpToHBaseOp(bp.getOp());
-      // Ignore unsupported ops
-      if (hbaseOp == null) continue;
-
-      for (SlotDescriptor slot: desc_.getSlots()) {
-        // Only push down predicates on string columns
-        if (slot.getType().getPrimitiveType() != PrimitiveType.STRING) continue;
-
-        Expr bindingExpr = bp.getSlotBinding(slot.getId());
-        if (bindingExpr == null || !(bindingExpr instanceof StringLiteral)) continue;
-
-        StringLiteral literal = (StringLiteral) bindingExpr;
-        HBaseColumn col = (HBaseColumn) slot.getColumn();
-        filters_.add(new THBaseFilter(
-            col.getColumnFamily(), col.getColumnQualifier(),
-            (byte) hbaseOp.ordinal(), literal.getUnescapedValue()));
-        analyzer.materializeSlots(Lists.newArrayList(e));
-      }
-    }
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.HBASE_SCAN_NODE;
-    HBaseTable tbl = (HBaseTable) desc_.getTable();
-    msg.hbase_scan_node =
-      new THBaseScanNode(desc_.getId().asInt(), tbl.getHBaseTableName());
-    if (!filters_.isEmpty()) {
-      msg.hbase_scan_node.setFilters(filters_);
-    }
-    msg.hbase_scan_node.setSuggested_max_caching(suggestedCaching_);
-  }
-
-  /**
-   * We create a TScanRange for each region server that contains at least one
-   * relevant region, and the created TScanRange will contain all the relevant regions
-   * of that region server.
-   */
-  private void computeScanRangeLocations(Analyzer analyzer) {
-    scanRanges_ = Lists.newArrayList();
-
-    // For empty scan node, return an empty list.
-    if (isEmpty_) return;
-
-    // Retrieve relevant HBase regions and their region servers
-    HBaseTable tbl = (HBaseTable) desc_.getTable();
-    org.apache.hadoop.hbase.client.Table hbaseTbl = null;
-    List<HRegionLocation> regionsLoc;
-    try {
-      hbaseTbl = tbl.getHBaseTable();
-      regionsLoc = HBaseTable.getRegionsInRange(hbaseTbl, startKey_, stopKey_);
-      hbaseTbl.close();
-    } catch (IOException e) {
-      throw new RuntimeException(
-          "couldn't retrieve HBase table (" + tbl.getHBaseTableName() + ") info:\n"
-          + e.getMessage(), e);
-    }
-
-    // Convert list of HRegionLocation to Map<hostport, List<HRegionLocation>>.
-    // The List<HRegionLocations>'s end up being sorted by start key/end key, because
-    // regionsLoc is sorted that way.
-    Map<String, List<HRegionLocation>> locationMap = Maps.newHashMap();
-    for (HRegionLocation regionLoc: regionsLoc) {
-      String locHostPort = regionLoc.getHostnamePort();
-      if (locationMap.containsKey(locHostPort)) {
-        locationMap.get(locHostPort).add(regionLoc);
-      } else {
-        locationMap.put(locHostPort, Lists.newArrayList(regionLoc));
-      }
-    }
-
-    for (Map.Entry<String, List<HRegionLocation>> locEntry: locationMap.entrySet()) {
-      // HBaseTableScanner(backend) initializes a result scanner for each key range.
-      // To minimize # of result scanner re-init, create only a single HBaseKeyRange
-      // for all adjacent regions on this server.
-      THBaseKeyRange keyRange = null;
-      byte[] prevEndKey = null;
-      for (HRegionLocation regionLoc: locEntry.getValue()) {
-        byte[] curRegStartKey = regionLoc.getRegionInfo().getStartKey();
-        byte[] curRegEndKey   = regionLoc.getRegionInfo().getEndKey();
-        if (prevEndKey != null &&
-            Bytes.compareTo(prevEndKey, curRegStartKey) == 0) {
-          // the current region starts where the previous one left off;
-          // extend the key range
-          setKeyRangeEnd(keyRange, curRegEndKey);
-        } else {
-          // create a new HBaseKeyRange (and TScanRange2/TScanRangeLocations to go
-          // with it).
-          keyRange = new THBaseKeyRange();
-          setKeyRangeStart(keyRange, curRegStartKey);
-          setKeyRangeEnd(keyRange, curRegEndKey);
-
-          TScanRangeLocations scanRangeLocation = new TScanRangeLocations();
-          TNetworkAddress networkAddress = addressToTNetworkAddress(locEntry.getKey());
-          scanRangeLocation.addToLocations(
-              new TScanRangeLocation(analyzer.getHostIndex().getIndex(networkAddress)));
-          scanRanges_.add(scanRangeLocation);
-
-          TScanRange scanRange = new TScanRange();
-          scanRange.setHbase_key_range(keyRange);
-          scanRangeLocation.setScan_range(scanRange);
-        }
-        prevEndKey = curRegEndKey;
-      }
-    }
-  }
-
-  /**
-   * Set the start key of keyRange using the provided key, bounded by startKey_
-   * @param keyRange the keyRange to be updated
-   * @param rangeStartKey the start key value to be set to
-   */
-  private void setKeyRangeStart(THBaseKeyRange keyRange, byte[] rangeStartKey) {
-    keyRange.unsetStartKey();
-    // use the max(startKey, rangeStartKey) for scan start
-    if (!Bytes.equals(rangeStartKey, HConstants.EMPTY_START_ROW) ||
-        !Bytes.equals(startKey_, HConstants.EMPTY_START_ROW)) {
-      byte[] partStart = (Bytes.compareTo(rangeStartKey, startKey_) < 0) ?
-          startKey_ : rangeStartKey;
-      keyRange.setStartKey(Bytes.toString(partStart));
-    }
-  }
-
-  /**
-   * Set the end key of keyRange using the provided key, bounded by stopKey_
-   * @param keyRange the keyRange to be updated
-   * @param rangeEndKey the end key value to be set to
-   */
-  private void setKeyRangeEnd(THBaseKeyRange keyRange, byte[] rangeEndKey) {
-    keyRange.unsetStopKey();
-    // use the min(stopkey, regionStopKey) for scan stop
-    if (!Bytes.equals(rangeEndKey, HConstants.EMPTY_END_ROW) ||
-        !Bytes.equals(stopKey_, HConstants.EMPTY_END_ROW)) {
-      if (Bytes.equals(stopKey_, HConstants.EMPTY_END_ROW)) {
-        keyRange.setStopKey(Bytes.toString(rangeEndKey));
-      } else if (Bytes.equals(rangeEndKey, HConstants.EMPTY_END_ROW)) {
-        keyRange.setStopKey(Bytes.toString(stopKey_));
-      } else {
-        byte[] partEnd = (Bytes.compareTo(rangeEndKey, stopKey_) < 0) ?
-            rangeEndKey : stopKey_;
-        keyRange.setStopKey(Bytes.toString(partEnd));
-      }
-    }
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    HBaseTable table = (HBaseTable) desc_.getTable();
-    StringBuilder output = new StringBuilder();
-    if (isEmpty_) {
-      output.append(prefix + "empty scan node\n");
-      return output.toString();
-    }
-    String aliasStr = "";
-    if (!table.getFullName().equalsIgnoreCase(desc_.getAlias()) &&
-        !table.getName().equalsIgnoreCase(desc_.getAlias())) {
-      aliasStr = " " + desc_.getAlias();
-    }
-    output.append(String.format("%s%s:%s [%s%s]\n", prefix, id_.toString(),
-        displayName_, table.getFullName(), aliasStr));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!Bytes.equals(startKey_, HConstants.EMPTY_START_ROW)) {
-        output.append(detailPrefix + "start key: " + printKey(startKey_) + "\n");
-      }
-      if (!Bytes.equals(stopKey_, HConstants.EMPTY_END_ROW)) {
-        output.append(detailPrefix + "stop key: " + printKey(stopKey_) + "\n");
-      }
-      if (!filters_.isEmpty()) {
-        output.append(detailPrefix + "hbase filters:");
-        if (filters_.size() == 1) {
-          THBaseFilter filter = filters_.get(0);
-          output.append(" " + filter.family + ":" + filter.qualifier + " " +
-              CompareFilter.CompareOp.values()[filter.op_ordinal].toString() + " " +
-              "'" + filter.filter_constant + "'");
-        } else {
-          for (int i = 0; i < filters_.size(); ++i) {
-            THBaseFilter filter = filters_.get(i);
-            output.append("\n  " + filter.family + ":" + filter.qualifier + " " +
-                CompareFilter.CompareOp.values()[filter.op_ordinal].toString() + " " +
-                "'" + filter.filter_constant + "'");
-          }
-        }
-        output.append('\n');
-      }
-      if (!conjuncts_.isEmpty()) {
-        output.append(
-            detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-      }
-    }
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(getStatsExplainString(detailPrefix, detailLevel));
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  /**
-   * Convert key into byte array and append a '\0' if 'nextKey' is true.
-   */
-  private byte[] convertToBytes(String rowKey, boolean nextKey) {
-    byte[] keyBytes = Bytes.toBytes(rowKey);
-    if (!nextKey) {
-      return keyBytes;
-    } else {
-      // append \0
-      return Arrays.copyOf(keyBytes, keyBytes.length + 1);
-    }
-  }
-
-  /**
-   * Prints non-printable characters in escaped octal, otherwise outputs
-   * the characters.
-   */
-  public static String printKey(byte[] key) {
-    StringBuilder result = new StringBuilder();
-    for (int i = 0; i < key.length; ++i) {
-      if (!Character.isISOControl(key[i])) {
-        result.append((char) key[i]);
-      } else {
-        result.append("\\");
-        result.append(Integer.toOctalString(key[i]));
-      }
-    }
-    return result.toString();
-  }
-
-  private static CompareFilter.CompareOp impalaOpToHBaseOp(
-      BinaryPredicate.Operator impalaOp) {
-    switch(impalaOp) {
-      case EQ: return CompareFilter.CompareOp.EQUAL;
-      case NE: return CompareFilter.CompareOp.NOT_EQUAL;
-      case GT: return CompareFilter.CompareOp.GREATER;
-      case GE: return CompareFilter.CompareOp.GREATER_OR_EQUAL;
-      case LT: return CompareFilter.CompareOp.LESS;
-      case LE: return CompareFilter.CompareOp.LESS_OR_EQUAL;
-      // TODO: Add support for pushing LIKE/REGEX down to HBase with a different Filter.
-      default: throw new IllegalArgumentException(
-          "HBase: Unsupported Impala compare operator: " + impalaOp);
-    }
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    // TODO: What's a good estimate of memory consumption?
-    perHostMemCost_ = 1024L * 1024L * 1024L;
-  }
-
-  /**
-   * Returns the per-host upper bound of memory that any number of concurrent scan nodes
-   * will use. Used for estimating the per-host memory requirement of queries.
-   */
-  public static long getPerHostMemUpperBound() {
-    // TODO: What's a good estimate of memory consumption?
-    return 1024L * 1024L * 1024L;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HBaseTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HBaseTableSink.java b/fe/src/main/java/com/cloudera/impala/planner/HBaseTableSink.java
deleted file mode 100644
index 2a0d1b7..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HBaseTableSink.java
+++ /dev/null
@@ -1,59 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
-
-/**
- * Class used to represent a Sink that will transport
- * data from a plan fragment into an HBase table using HTable.
- */
-public class HBaseTableSink extends TableSink {
-  public HBaseTableSink(Table targetTable) {
-    super(targetTable, Op.INSERT);
-  }
-
-  @Override
-  public String getExplainString(String prefix, String detailPrefix,
-      TExplainLevel explainLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(prefix + "WRITE TO HBASE table=" + targetTable_.getFullName() + "\n");
-    if (explainLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(PrintUtils.printHosts(detailPrefix, fragment_.getNumNodes()));
-      output.append(PrintUtils.printMemCost(" ", perHostMemCost_));
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected TDataSink toThrift() {
-    TDataSink result = new TDataSink(TDataSinkType.TABLE_SINK);
-    TTableSink tTableSink = new TTableSink(targetTable_.getId().asInt(),
-        TTableSinkType.HBASE, sinkOp_.toThrift());
-    result.table_sink = tTableSink;
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HashJoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HashJoinNode.java b/fe/src/main/java/com/cloudera/impala/planner/HashJoinNode.java
deleted file mode 100644
index 906f732..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HashJoinNode.java
+++ /dev/null
@@ -1,193 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TEqJoinCondition;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THashJoinNode;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Hash join between left child (outer) and right child (inner). One child must be the
- * plan generated for a table ref. Typically, that is the right child, but due to join
- * inversion (for outer/semi/cross joins) it could also be the left child.
- */
-public class HashJoinNode extends JoinNode {
-  private final static Logger LOG = LoggerFactory.getLogger(HashJoinNode.class);
-
-  public HashJoinNode(PlanNode outer, PlanNode inner, boolean isStraightJoin,
-      DistributionMode distrMode, JoinOperator joinOp,
-      List<BinaryPredicate> eqJoinConjuncts, List<Expr> otherJoinConjuncts) {
-    super(outer, inner, isStraightJoin, distrMode, joinOp, eqJoinConjuncts,
-        otherJoinConjuncts, "HASH JOIN");
-    Preconditions.checkNotNull(eqJoinConjuncts);
-    Preconditions.checkState(joinOp_ != JoinOperator.CROSS_JOIN);
-  }
-
-  @Override
-  public List<BinaryPredicate> getEqJoinConjuncts() { return eqJoinConjuncts_; }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    super.init(analyzer);
-    List<BinaryPredicate> newEqJoinConjuncts = Lists.newArrayList();
-    ExprSubstitutionMap combinedChildSmap = getCombinedChildSmap();
-    for (Expr c: eqJoinConjuncts_) {
-      BinaryPredicate eqPred =
-          (BinaryPredicate) c.substitute(combinedChildSmap, analyzer, false);
-      Type t0 = eqPred.getChild(0).getType();
-      Type t1 = eqPred.getChild(1).getType();
-      if (!t0.matchesType(t1)) {
-        // With decimal and char types, the child types do not have to match because
-        // the equality builtin handles it. However, they will not hash correctly so
-        // insert a cast.
-        boolean bothDecimal = t0.isDecimal() && t1.isDecimal();
-        boolean bothString = t0.isStringType() && t1.isStringType();
-        if (!bothDecimal && !bothString) {
-          throw new InternalException("Cannot compare " +
-              t0.toSql() + " to " + t1.toSql() + " in join predicate.");
-        }
-        Type compatibleType = Type.getAssignmentCompatibleType(t0, t1, false);
-        Preconditions.checkState(compatibleType.isDecimal() ||
-            compatibleType.isStringType());
-        try {
-          if (!t0.equals(compatibleType)) {
-            eqPred.setChild(0, eqPred.getChild(0).castTo(compatibleType));
-          }
-          if (!t1.equals(compatibleType)) {
-            eqPred.setChild(1, eqPred.getChild(1).castTo(compatibleType));
-          }
-        } catch (AnalysisException e) {
-          throw new InternalException("Should not happen", e);
-        }
-      }
-      Preconditions.checkState(
-          eqPred.getChild(0).getType().matchesType(eqPred.getChild(1).getType()));
-      BinaryPredicate newEqPred = new BinaryPredicate(eqPred.getOp(),
-          eqPred.getChild(0), eqPred.getChild(1));
-      newEqPred.analyze(analyzer);
-      newEqJoinConjuncts.add(newEqPred);
-    }
-    eqJoinConjuncts_ = newEqJoinConjuncts;
-    orderJoinConjunctsByCost();
-    computeStats(analyzer);
-  }
-
-  @Override
-  protected String debugString() {
-    return Objects.toStringHelper(this)
-        .add("eqJoinConjuncts_", eqJoinConjunctsDebugString())
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  private String eqJoinConjunctsDebugString() {
-    Objects.ToStringHelper helper = Objects.toStringHelper(this);
-    for (Expr entry: eqJoinConjuncts_) {
-      helper.add("lhs" , entry.getChild(0)).add("rhs", entry.getChild(1));
-    }
-    return helper.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.HASH_JOIN_NODE;
-    msg.hash_join_node = new THashJoinNode();
-    msg.hash_join_node.join_op = joinOp_.toThrift();
-    for (Expr entry: eqJoinConjuncts_) {
-      BinaryPredicate bp = (BinaryPredicate)entry;
-      TEqJoinCondition eqJoinCondition =
-          new TEqJoinCondition(bp.getChild(0).treeToThrift(),
-              bp.getChild(1).treeToThrift(),
-              bp.getOp() == BinaryPredicate.Operator.NOT_DISTINCT);
-      msg.hash_join_node.addToEq_join_conjuncts(eqJoinCondition);
-    }
-    for (Expr e: otherJoinConjuncts_) {
-      msg.hash_join_node.addToOther_join_conjuncts(e.treeToThrift());
-    }
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s [%s]\n", prefix, getDisplayLabel(),
-        getDisplayLabelDetail()));
-
-    if (detailLevel.ordinal() > TExplainLevel.STANDARD.ordinal()) {
-      if (joinTableId_.isValid()) {
-        output.append(
-            detailPrefix + "hash-table-id=" + joinTableId_.toString() + "\n");
-      }
-    }
-    if (detailLevel.ordinal() > TExplainLevel.MINIMAL.ordinal()) {
-      output.append(detailPrefix + "hash predicates: ");
-      for (int i = 0; i < eqJoinConjuncts_.size(); ++i) {
-        Expr eqConjunct = eqJoinConjuncts_.get(i);
-        output.append(eqConjunct.toSql());
-        if (i + 1 != eqJoinConjuncts_.size()) output.append(", ");
-      }
-      output.append("\n");
-      if (!otherJoinConjuncts_.isEmpty()) {
-        output.append(detailPrefix + "other join predicates: ")
-        .append(getExplainString(otherJoinConjuncts_) + "\n");
-      }
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "other predicates: ")
-        .append(getExplainString(conjuncts_) + "\n");
-      }
-      if (!runtimeFilters_.isEmpty()) {
-        output.append(detailPrefix + "runtime filters: ");
-        output.append(getRuntimeFilterExplainString(true));
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    if (getChild(1).getCardinality() == -1 || getChild(1).getAvgRowSize() == -1
-        || numNodes_ == 0) {
-      perHostMemCost_ = DEFAULT_PER_HOST_MEM;
-      return;
-    }
-    perHostMemCost_ =
-        (long) Math.ceil(getChild(1).cardinality_ * getChild(1).avgRowSize_
-          * PlannerContext.HASH_TBL_SPACE_OVERHEAD);
-    if (distrMode_ == DistributionMode.PARTITIONED) perHostMemCost_ /= numNodes_;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionFilter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionFilter.java b/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionFilter.java
deleted file mode 100644
index d1710aa..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionFilter.java
+++ /dev/null
@@ -1,127 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/**
- * An HdfsPartitionFilter represents a predicate on the partition columns (or a subset)
- * of a table. It can be evaluated at plan generation time against an HdfsPartition.
- */
-public class HdfsPartitionFilter {
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartitionFilter.class);
-
-  private final Expr predicate_;
-
-  // lhs exprs of smap used in isMatch()
-  private final ArrayList<SlotRef> lhsSlotRefs_ = Lists.newArrayList();
-  // indices into Table.getColumns()
-  private final ArrayList<Integer> refdKeys_ = Lists.newArrayList();
-
-  public HdfsPartitionFilter(Expr predicate, HdfsTable tbl, Analyzer analyzer) {
-    predicate_ = predicate;
-
-    // populate lhsSlotRefs_ and refdKeys_
-    ArrayList<SlotId> refdSlots = Lists.newArrayList();
-    predicate.getIds(null, refdSlots);
-    HashMap<Column, SlotDescriptor> slotDescsByCol = Maps.newHashMap();
-    for (SlotId refdSlot: refdSlots) {
-      SlotDescriptor slotDesc = analyzer.getDescTbl().getSlotDesc(refdSlot);
-      slotDescsByCol.put(slotDesc.getColumn(), slotDesc);
-    }
-
-    for (int i = 0; i < tbl.getNumClusteringCols(); ++i) {
-      Column col = tbl.getColumns().get(i);
-      SlotDescriptor slotDesc = slotDescsByCol.get(col);
-      if (slotDesc != null) {
-        lhsSlotRefs_.add(new SlotRef(slotDesc));
-        refdKeys_.add(i);
-      }
-    }
-    Preconditions.checkState(lhsSlotRefs_.size() == refdKeys_.size());
-  }
-
-  /**
-   * Evaluate a filter against a batch of partitions and return the partition ids
-   * that pass the filter.
-   */
-  public HashSet<Long> getMatchingPartitionIds(ArrayList<HdfsPartition> partitions,
-      Analyzer analyzer) throws InternalException {
-    HashSet<Long> result = new HashSet<Long>();
-    // List of predicates to evaluate
-    ArrayList<Expr> predicates = new ArrayList<Expr>(partitions.size());
-    long[] partitionIds = new long[partitions.size()];
-    int indx = 0;
-    for (HdfsPartition p: partitions) {
-      predicates.add(buildPartitionPredicate(p, analyzer));
-      partitionIds[indx++] = p.getId();
-    }
-    // Evaluate the predicates
-    TResultRow results = FeSupport.EvalPredicateBatch(predicates,
-        analyzer.getQueryCtx());
-    Preconditions.checkState(results.getColValsSize() == partitions.size());
-    indx = 0;
-    for (TColumnValue val: results.getColVals()) {
-      if (val.isBool_val()) result.add(partitionIds[indx]);
-      ++indx;
-    }
-    return result;
-  }
-
-  /**
-   * Construct a predicate for a given partition by substituting the SlotRefs
-   * for the partition cols with the respective partition-key values.
-   */
-  private Expr buildPartitionPredicate(HdfsPartition partition, Analyzer analyzer)
-      throws InternalException {
-    // construct smap
-    ExprSubstitutionMap sMap = new ExprSubstitutionMap();
-    for (int i = 0; i < refdKeys_.size(); ++i) {
-      sMap.put(
-          lhsSlotRefs_.get(i), partition.getPartitionValues().get(refdKeys_.get(i)));
-    }
-
-    Expr literalPredicate = predicate_.substitute(sMap, analyzer, false);
-    LOG.trace("buildPartitionPredicate: " + literalPredicate.toSql() + " " +
-        literalPredicate.debugString());
-    Preconditions.checkState(literalPredicate.isConstant());
-    return literalPredicate;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionPruner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionPruner.java
deleted file mode 100644
index 9606dc5..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HdfsPartitionPruner.java
+++ /dev/null
@@ -1,475 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BetweenPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate.Operator;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.InPredicate;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-
-/**
- * HDFS partitions pruner provides a mechanism to filter out partitions of an HDFS
- * table based on the conjuncts provided by the caller.
- *
- * The pruner is initialized with a TupleDescriptor for the slots being materialized.
- * The prunePartitions() method is the external interface exposed by this class. It
- * takes a list of conjuncts, loops through all the partitions and prunes them based
- * on applicable conjuncts. It returns a list of partitions left after applying all
- * the conjuncts and also removes the conjuncts which have been fully evaluated with
- * the partition columns.
- */
-public class HdfsPartitionPruner {
-
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartitionPruner.class);
-
-  // Partition batch size used during partition pruning.
-  private final static int PARTITION_PRUNING_BATCH_SIZE = 1024;
-
-  private final HdfsTable tbl_;
-
-  private List<SlotId> partitionSlots_ = Lists.newArrayList();
-
-  public HdfsPartitionPruner(TupleDescriptor tupleDesc) {
-    Preconditions.checkState(tupleDesc.getTable() instanceof HdfsTable);
-    tbl_ = (HdfsTable)tupleDesc.getTable();
-
-    // Collect all the partitioning columns from TupleDescriptor.
-    for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-      if (slotDesc.getColumn() == null) continue;
-      if (slotDesc.getColumn().getPosition() < tbl_.getNumClusteringCols()) {
-        partitionSlots_.add(slotDesc.getId());
-      }
-    }
-  }
-
-  /**
-   * Return a list of partitions left after applying the conjuncts. Please note
-   * that conjunts used for filtering will be removed from the list 'conjuncts'.
-   */
-  public List<HdfsPartition> prunePartitions(Analyzer analyzer, List<Expr> conjuncts)
-      throws InternalException {
-    // Start with creating a collection of partition filters for the applicable conjuncts.
-    List<HdfsPartitionFilter> partitionFilters = Lists.newArrayList();
-    // Conjuncts that can be evaluated from the partition key values.
-    List<Expr> simpleFilterConjuncts = Lists.newArrayList();
-
-    // Simple predicates (e.g. binary predicates of the form
-    // <SlotRef> <op> <LiteralExpr>) can be used to derive lists
-    // of matching partition ids directly from the partition key values.
-    // Split conjuncts among those that can be evaluated from partition
-    // key values and those that need to be evaluated in the BE.
-    Iterator<Expr> it = conjuncts.iterator();
-    while (it.hasNext()) {
-      Expr conjunct = it.next();
-      if (conjunct.isBoundBySlotIds(partitionSlots_)) {
-        // Check if the conjunct can be evaluated from the partition metadata.
-        // canEvalUsingPartitionMd() operates on a cloned conjunct which may get
-        // modified if it contains constant expressions. If the cloned conjunct
-        // cannot be evaluated from the partition metadata, the original unmodified
-        // conjuct is evaluated in the BE.
-        Expr clonedConjunct = conjunct.clone();
-        if (canEvalUsingPartitionMd(clonedConjunct, analyzer)) {
-          simpleFilterConjuncts.add(Expr.pushNegationToOperands(clonedConjunct));
-        } else {
-          partitionFilters.add(new HdfsPartitionFilter(conjunct, tbl_, analyzer));
-        }
-        it.remove();
-      }
-    }
-
-    // Set of matching partition ids, i.e. partitions that pass all filters
-    HashSet<Long> matchingPartitionIds = null;
-
-    // Evaluate the partition filters from the partition key values.
-    // The result is the intersection of the associated partition id sets.
-    for (Expr filter: simpleFilterConjuncts) {
-      // Evaluate the filter
-      HashSet<Long> matchingIds = evalSlotBindingFilter(filter);
-      if (matchingPartitionIds == null) {
-        matchingPartitionIds = matchingIds;
-      } else {
-        matchingPartitionIds.retainAll(matchingIds);
-      }
-    }
-
-    // Check if we need to initialize the set of valid partition ids.
-    if (simpleFilterConjuncts.size() == 0) {
-      Preconditions.checkState(matchingPartitionIds == null);
-      matchingPartitionIds = Sets.newHashSet(tbl_.getPartitionIds());
-    }
-
-    // Evaluate the 'complex' partition filters in the BE.
-    evalPartitionFiltersInBe(partitionFilters, matchingPartitionIds, analyzer);
-
-    // Populate the list of valid, non-empty partitions to process
-    List<HdfsPartition> results = Lists.newArrayList();
-    Map<Long, HdfsPartition> partitionMap = tbl_.getPartitionMap();
-    for (Long id: matchingPartitionIds) {
-      HdfsPartition partition = partitionMap.get(id);
-      Preconditions.checkNotNull(partition);
-      if (partition.hasFileDescriptors()) {
-        results.add(partition);
-        analyzer.getDescTbl().addReferencedPartition(tbl_, partition.getId());
-      }
-    }
-    return results;
-  }
-
-  /**
-   * Recursive function that checks if a given partition expr can be evaluated
-   * directly from the partition key values. If 'expr' contains any constant expressions,
-   * they are evaluated in the BE and are replaced by their corresponding results, as
-   * LiteralExprs.
-   */
-  private boolean canEvalUsingPartitionMd(Expr expr, Analyzer analyzer) {
-    Preconditions.checkNotNull(expr);
-    if (expr instanceof BinaryPredicate) {
-      // Evaluate any constant expression in the BE
-      try {
-        expr.foldConstantChildren(analyzer);
-      } catch (AnalysisException e) {
-        LOG.error("Error evaluating constant expressions in the BE: " + e.getMessage());
-        return false;
-      }
-      BinaryPredicate bp = (BinaryPredicate)expr;
-      SlotRef slot = bp.getBoundSlot();
-      if (slot == null) return false;
-      Expr bindingExpr = bp.getSlotBinding(slot.getSlotId());
-      if (bindingExpr == null || !bindingExpr.isLiteral()) return false;
-      return true;
-    } else if (expr instanceof CompoundPredicate) {
-      boolean res = canEvalUsingPartitionMd(expr.getChild(0), analyzer);
-      if (expr.getChild(1) != null) {
-        res &= canEvalUsingPartitionMd(expr.getChild(1), analyzer);
-      }
-      return res;
-    } else if (expr instanceof IsNullPredicate) {
-      // Check for SlotRef IS [NOT] NULL case
-      IsNullPredicate nullPredicate = (IsNullPredicate)expr;
-      return nullPredicate.getBoundSlot() != null;
-    } else if (expr instanceof InPredicate) {
-      // Evaluate any constant expressions in the BE
-      try {
-        expr.foldConstantChildren(analyzer);
-      } catch (AnalysisException e) {
-        LOG.error("Error evaluating constant expressions in the BE: " + e.getMessage());
-        return false;
-      }
-      // Check for SlotRef [NOT] IN (Literal, ... Literal) case
-      SlotRef slot = ((InPredicate)expr).getBoundSlot();
-      if (slot == null) return false;
-      for (int i = 1; i < expr.getChildren().size(); ++i) {
-        if (!(expr.getChild(i).isLiteral())) return false;
-      }
-      return true;
-    } else if (expr instanceof BetweenPredicate) {
-      return canEvalUsingPartitionMd(((BetweenPredicate) expr).getRewrittenPredicate(),
-          analyzer);
-    }
-    return false;
-  }
-
-  /**
-   * Evaluate a BinaryPredicate filter on a partition column and return the
-   * ids of the matching partitions. An empty set is returned if there
-   * are no matching partitions.
-   */
-  private HashSet<Long> evalBinaryPredicate(Expr expr) {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkState(expr instanceof BinaryPredicate);
-    boolean isSlotOnLeft = true;
-    if (expr.getChild(0).isLiteral()) isSlotOnLeft = false;
-
-    // Get the operands
-    BinaryPredicate bp = (BinaryPredicate)expr;
-    SlotRef slot = bp.getBoundSlot();
-    Preconditions.checkNotNull(slot);
-    Expr bindingExpr = bp.getSlotBinding(slot.getSlotId());
-    Preconditions.checkNotNull(bindingExpr);
-    Preconditions.checkState(bindingExpr.isLiteral());
-    LiteralExpr literal = (LiteralExpr)bindingExpr;
-    Operator op = bp.getOp();
-    if ((literal instanceof NullLiteral) && (op != Operator.NOT_DISTINCT)
-        && (op != Operator.DISTINCT_FROM)) {
-      return Sets.newHashSet();
-    }
-
-    // Get the partition column position and retrieve the associated partition
-    // value metadata.
-    int partitionPos = slot.getDesc().getColumn().getPosition();
-    TreeMap<LiteralExpr, HashSet<Long>> partitionValueMap =
-        tbl_.getPartitionValueMap(partitionPos);
-    if (partitionValueMap.isEmpty()) return Sets.newHashSet();
-
-    HashSet<Long> matchingIds = Sets.newHashSet();
-    // Compute the matching partition ids
-    if (op == Operator.NOT_DISTINCT) {
-      // Case: SlotRef <=> Literal
-      if (literal instanceof NullLiteral) {
-        Set<Long> ids = tbl_.getNullPartitionIds(partitionPos);
-        if (ids != null) matchingIds.addAll(ids);
-        return matchingIds;
-      }
-      // Punt to equality case:
-      op = Operator.EQ;
-    }
-    if (op == Operator.EQ) {
-      // Case: SlotRef = Literal
-      HashSet<Long> ids = partitionValueMap.get(literal);
-      if (ids != null) matchingIds.addAll(ids);
-      return matchingIds;
-    }
-    if (op == Operator.DISTINCT_FROM) {
-      // Case: SlotRef IS DISTINCT FROM Literal
-      if (literal instanceof NullLiteral) {
-        matchingIds.addAll(tbl_.getPartitionIds());
-        Set<Long> nullIds = tbl_.getNullPartitionIds(partitionPos);
-        matchingIds.removeAll(nullIds);
-        return matchingIds;
-      } else {
-        matchingIds.addAll(tbl_.getPartitionIds());
-        HashSet<Long> ids = partitionValueMap.get(literal);
-        if (ids != null) matchingIds.removeAll(ids);
-        return matchingIds;
-      }
-    }
-    if (op == Operator.NE) {
-      // Case: SlotRef != Literal
-      matchingIds.addAll(tbl_.getPartitionIds());
-      Set<Long> nullIds = tbl_.getNullPartitionIds(partitionPos);
-      matchingIds.removeAll(nullIds);
-      HashSet<Long> ids = partitionValueMap.get(literal);
-      if (ids != null) matchingIds.removeAll(ids);
-      return matchingIds;
-    }
-
-    // Determine the partition key value range of this predicate.
-    NavigableMap<LiteralExpr, HashSet<Long>> rangeValueMap = null;
-    LiteralExpr firstKey = partitionValueMap.firstKey();
-    LiteralExpr lastKey = partitionValueMap.lastKey();
-    boolean upperInclusive = false;
-    boolean lowerInclusive = false;
-    LiteralExpr upperBoundKey = null;
-    LiteralExpr lowerBoundKey = null;
-
-    if (((op == Operator.LE || op == Operator.LT) && isSlotOnLeft) ||
-        ((op == Operator.GE || op == Operator.GT) && !isSlotOnLeft)) {
-      // Case: SlotRef <[=] Literal
-      if (literal.compareTo(firstKey) < 0) return Sets.newHashSet();
-      if (op == Operator.LE || op == Operator.GE) upperInclusive = true;
-
-      if (literal.compareTo(lastKey) <= 0) {
-        upperBoundKey = literal;
-      } else {
-        upperBoundKey = lastKey;
-        upperInclusive = true;
-      }
-      lowerBoundKey = firstKey;
-      lowerInclusive = true;
-    } else {
-      // Cases: SlotRef >[=] Literal
-      if (literal.compareTo(lastKey) > 0) return Sets.newHashSet();
-      if (op == Operator.GE || op == Operator.LE) lowerInclusive = true;
-
-      if (literal.compareTo(firstKey) >= 0) {
-        lowerBoundKey = literal;
-      } else {
-        lowerBoundKey = firstKey;
-        lowerInclusive = true;
-      }
-      upperBoundKey = lastKey;
-      upperInclusive = true;
-    }
-
-    // Retrieve the submap that corresponds to the computed partition key
-    // value range.
-    rangeValueMap = partitionValueMap.subMap(lowerBoundKey, lowerInclusive,
-        upperBoundKey, upperInclusive);
-    // Compute the matching partition ids
-    for (HashSet<Long> idSet: rangeValueMap.values()) {
-      if (idSet != null) matchingIds.addAll(idSet);
-    }
-    return matchingIds;
-  }
-
-  /**
-   * Evaluate an InPredicate filter on a partition column and return the ids of
-   * the matching partitions.
-   */
-  private HashSet<Long> evalInPredicate(Expr expr) {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkState(expr instanceof InPredicate);
-    InPredicate inPredicate = (InPredicate)expr;
-    HashSet<Long> matchingIds = Sets.newHashSet();
-    SlotRef slot = inPredicate.getBoundSlot();
-    Preconditions.checkNotNull(slot);
-    int partitionPos = slot.getDesc().getColumn().getPosition();
-    TreeMap<LiteralExpr, HashSet<Long>> partitionValueMap =
-        tbl_.getPartitionValueMap(partitionPos);
-
-    if (inPredicate.isNotIn()) {
-      // Case: SlotRef NOT IN (Literal, ..., Literal)
-      // If there is a NullLiteral, return an empty set.
-      List<Expr> nullLiterals = Lists.newArrayList();
-      inPredicate.collectAll(Predicates.instanceOf(NullLiteral.class), nullLiterals);
-      if (!nullLiterals.isEmpty()) return matchingIds;
-      matchingIds.addAll(tbl_.getPartitionIds());
-      // Exclude partitions with null partition column values
-      Set<Long> nullIds = tbl_.getNullPartitionIds(partitionPos);
-      matchingIds.removeAll(nullIds);
-    }
-    // Compute the matching partition ids
-    for (int i = 1; i < inPredicate.getChildren().size(); ++i) {
-      LiteralExpr literal = (LiteralExpr)inPredicate.getChild(i);
-      HashSet<Long> idSet = partitionValueMap.get(literal);
-      if (idSet != null) {
-        if (inPredicate.isNotIn()) {
-          matchingIds.removeAll(idSet);
-        } else {
-          matchingIds.addAll(idSet);
-        }
-      }
-    }
-    return matchingIds;
-  }
-
-  /**
-   * Evaluate an IsNullPredicate on a partition column and return the ids of the
-   * matching partitions.
-   */
-  private HashSet<Long> evalIsNullPredicate(Expr expr) {
-    Preconditions.checkNotNull(expr);
-    Preconditions.checkState(expr instanceof IsNullPredicate);
-    HashSet<Long> matchingIds = Sets.newHashSet();
-    IsNullPredicate nullPredicate = (IsNullPredicate)expr;
-    SlotRef slot = nullPredicate.getBoundSlot();
-    Preconditions.checkNotNull(slot);
-    int partitionPos = slot.getDesc().getColumn().getPosition();
-    Set<Long> nullPartitionIds = tbl_.getNullPartitionIds(partitionPos);
-
-    if (nullPredicate.isNotNull()) {
-      matchingIds.addAll(tbl_.getPartitionIds());
-      matchingIds.removeAll(nullPartitionIds);
-    } else {
-      matchingIds.addAll(nullPartitionIds);
-    }
-    return matchingIds;
-  }
-
-  /**
-   * Evaluate a slot binding predicate on a partition key using the partition
-   * key values; return the matching partition ids. An empty set is returned
-   * if there are no matching partitions. This function can evaluate the following
-   * types of predicates: BinaryPredicate, CompoundPredicate, IsNullPredicate,
-   * InPredicate, and BetweenPredicate.
-   */
-  private HashSet<Long> evalSlotBindingFilter(Expr expr) {
-    Preconditions.checkNotNull(expr);
-    if (expr instanceof BinaryPredicate) {
-      return evalBinaryPredicate(expr);
-    } else if (expr instanceof CompoundPredicate) {
-      HashSet<Long> leftChildIds = evalSlotBindingFilter(expr.getChild(0));
-      CompoundPredicate cp = (CompoundPredicate)expr;
-      // NOT operators have been eliminated
-      Preconditions.checkState(cp.getOp() != CompoundPredicate.Operator.NOT);
-      if (cp.getOp() == CompoundPredicate.Operator.AND) {
-        HashSet<Long> rightChildIds = evalSlotBindingFilter(expr.getChild(1));
-        leftChildIds.retainAll(rightChildIds);
-      } else if (cp.getOp() == CompoundPredicate.Operator.OR) {
-        HashSet<Long> rightChildIds = evalSlotBindingFilter(expr.getChild(1));
-        leftChildIds.addAll(rightChildIds);
-      }
-      return leftChildIds;
-    } else if (expr instanceof InPredicate) {
-      return evalInPredicate(expr);
-    } else if (expr instanceof IsNullPredicate) {
-      return evalIsNullPredicate(expr);
-    } else if (expr instanceof BetweenPredicate) {
-      return evalSlotBindingFilter(((BetweenPredicate) expr).getRewrittenPredicate());
-    }
-    return null;
-  }
-
-  /**
-   * Evaluate a list of HdfsPartitionFilters in the BE. These are 'complex'
-   * filters that could not be evaluated from the partition key values.
-   */
-  private void evalPartitionFiltersInBe(List<HdfsPartitionFilter> filters,
-      HashSet<Long> matchingPartitionIds, Analyzer analyzer) throws InternalException {
-    Map<Long, HdfsPartition> partitionMap = tbl_.getPartitionMap();
-    // Set of partition ids that pass a filter
-    HashSet<Long> matchingIds = Sets.newHashSet();
-    // Batch of partitions
-    ArrayList<HdfsPartition> partitionBatch = Lists.newArrayList();
-    // Identify the partitions that pass all filters.
-    for (HdfsPartitionFilter filter: filters) {
-      // Iterate through the currently valid partitions
-      for (Long id: matchingPartitionIds) {
-        HdfsPartition p = partitionMap.get(id);
-        Preconditions.checkState(
-            p.getPartitionValues().size() == tbl_.getNumClusteringCols());
-        // Add the partition to the current batch
-        partitionBatch.add(partitionMap.get(id));
-        if (partitionBatch.size() == PARTITION_PRUNING_BATCH_SIZE) {
-          // Batch is full. Evaluate the predicates of this batch in the BE.
-          matchingIds.addAll(filter.getMatchingPartitionIds(partitionBatch, analyzer));
-          partitionBatch.clear();
-        }
-      }
-      // Check if there are any unprocessed partitions.
-      if (!partitionBatch.isEmpty()) {
-        matchingIds.addAll(filter.getMatchingPartitionIds(partitionBatch, analyzer));
-        partitionBatch.clear();
-      }
-      // Prune the partitions ids that didn't pass the filter
-      matchingPartitionIds.retainAll(matchingIds);
-      matchingIds.clear();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/HdfsScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/HdfsScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/HdfsScanNode.java
deleted file mode 100644
index 9b83902..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/HdfsScanNode.java
+++ /dev/null
@@ -1,677 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TableRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsPartition.FileBlock;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsFileSplit;
-import com.cloudera.impala.thrift.THdfsScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TReplicaPreference;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.cloudera.impala.util.MembershipSnapshot;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Objects.ToStringHelper;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Scan of a single single table. Currently limited to full-table scans.
- *
- * It's expected that the creator of this object has already done any necessary
- * partition pruning before creating this object. In other words, the 'conjuncts'
- * passed to the constructors are conjucts not fully evaluated by partition pruning
- * and 'partitions' are the remaining partitions after pruning.
- *
- * TODO: pass in range restrictions.
- */
-public class HdfsScanNode extends ScanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsScanNode.class);
-
-  // Read size of the backend I/O manager. Used in computeCosts().
-  private final static long IO_MGR_BUFFER_SIZE = 8L * 1024L * 1024L;
-
-  // Maximum number of I/O buffers per thread executing this scan.
-  private final static long MAX_IO_BUFFERS_PER_THREAD = 10;
-
-  // Number of scanner threads per core executing this scan.
-  private final static int THREADS_PER_CORE = 3;
-
-  // Factor capturing the worst-case deviation from a uniform distribution of scan ranges
-  // among nodes. The factor of 1.2 means that a particular node may have 20% more
-  // scan ranges than would have been estimated assuming a uniform distribution.
-  private final static double SCAN_RANGE_SKEW_FACTOR = 1.2;
-
-  private final HdfsTable tbl_;
-
-  // Partitions that are filtered in for scanning by the key ranges
-  private final List<HdfsPartition> partitions_;
-
-  private final TReplicaPreference replicaPreference_;
-  private final boolean randomReplica_;
-
-  // Total number of files from partitions_
-  private long totalFiles_ = 0;
-
-  // Total number of bytes from partitions_
-  private long totalBytes_ = 0;
-
-  // True if this scan node should use codegen for evaluting conjuncts.
-  private boolean codegenConjuncts_;
-
-  // Conjuncts that can be evaluated while materializing the items (tuples) of
-  // collection-typed slots. Maps from tuple descriptor to the conjuncts bound by that
-  // tuple. Uses a linked hash map for consistent display in explain.
-  private final Map<TupleDescriptor, List<Expr>> collectionConjuncts_ =
-      Maps.newLinkedHashMap();
-
-  // Indicates corrupt table stats based on the number of non-empty scan ranges and
-  // numRows set to 0. Set in computeStats().
-  private boolean hasCorruptTableStats_;
-
-  // Number of header lines to skip at the beginning of each file of this table. Only set
-  // to values > 0 for hdfs text files.
-  private int skipHeaderLineCount_ = 0;
-
-  /**
-   * Construct a node to scan given data files into tuples described by 'desc',
-   * with 'conjuncts' being the unevaluated conjuncts bound by the tuple and
-   * 'partitions' being the partitions which need to be included. Please see
-   * class comments above for details.
-   */
-  public HdfsScanNode(PlanNodeId id, TupleDescriptor desc, List<Expr> conjuncts,
-      List<HdfsPartition> partitions, TableRef hdfsTblRef) {
-    super(id, desc, "SCAN HDFS");
-    Preconditions.checkState(desc.getTable() instanceof HdfsTable);
-    tbl_ = (HdfsTable)desc.getTable();
-    conjuncts_ = conjuncts;
-    partitions_ = partitions;
-    replicaPreference_ = hdfsTblRef.getReplicaPreference();
-    randomReplica_ = hdfsTblRef.getRandomReplica();
-    HdfsTable hdfsTable = (HdfsTable)hdfsTblRef.getTable();
-    Preconditions.checkState(tbl_ == hdfsTable);
-    StringBuilder error = new StringBuilder();
-    skipHeaderLineCount_ = tbl_.parseSkipHeaderLineCount(error);
-    if (error.length() > 0) {
-      // Any errors should already have been caught during analysis.
-      throw new IllegalStateException(error.toString());
-    }
-  }
-
-  @Override
-  protected String debugString() {
-    ToStringHelper helper = Objects.toStringHelper(this);
-    for (HdfsPartition partition: partitions_) {
-      helper.add("Partition " + partition.getId() + ":", partition.toString());
-    }
-    return helper.addValue(super.debugString()).toString();
-  }
-
-  /**
-   * Populate collectionConjuncts_ and scanRanges_.
-   */
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-    checkForSupportedFileFormats();
-
-    assignCollectionConjuncts(analyzer);
-
-    computeMemLayout(analyzer);
-
-    // compute scan range locations
-    computeScanRangeLocations(analyzer);
-
-    // do this at the end so it can take all conjuncts and scan ranges into account
-    computeStats(analyzer);
-
-    // TODO: do we need this?
-    assignedConjuncts_ = analyzer.getAssignedConjuncts();
-
-    // Decide whether codegen should be used for evaluating conjuncts.
-    checkForCodegen(analyzer);
-  }
-
-  /**
-   * Throws if the table schema contains a complex type and we need to scan
-   * a partition that has a format for which we do not support complex types,
-   * regardless of whether a complex-typed column is actually referenced
-   * in the query.
-   */
-  @Override
-  protected void checkForSupportedFileFormats() throws NotImplementedException {
-    Preconditions.checkNotNull(desc_);
-    Preconditions.checkNotNull(desc_.getTable());
-    Column firstComplexTypedCol = null;
-    for (Column col: desc_.getTable().getColumns()) {
-      if (col.getType().isComplexType()) {
-        firstComplexTypedCol = col;
-        break;
-      }
-    }
-    if (firstComplexTypedCol == null) return;
-
-    boolean referencesComplexTypedCol = false;
-    for (SlotDescriptor slotDesc: desc_.getSlots()) {
-      if (!slotDesc.isMaterialized()) continue;
-      if (slotDesc.getType().isComplexType() || slotDesc.getColumn() == null) {
-        referencesComplexTypedCol = true;
-        break;
-      }
-    }
-
-    for (HdfsPartition part: partitions_) {
-      HdfsFileFormat format = part.getInputFormatDescriptor().getFileFormat();
-      if (format.isComplexTypesSupported()) continue;
-      // If the file format allows querying just scalar typed columns and the query
-      // doesn't materialize any complex typed columns, it is allowed.
-      if (format.canSkipComplexTypes() && !referencesComplexTypedCol) {
-        continue;
-      }
-      String errSuffix = String.format(
-          "Complex types are supported for these file formats: %s",
-          Joiner.on(", ").join(HdfsFileFormat.complexTypesFormats()));
-      if (desc_.getTable().getNumClusteringCols() == 0) {
-        throw new NotImplementedException(String.format(
-            "Scan of table '%s' in format '%s' is not supported because the table " +
-            "has a column '%s' with a complex type '%s'.\n%s.",
-            desc_.getAlias(), format, firstComplexTypedCol.getName(),
-            firstComplexTypedCol.getType().toSql(), errSuffix));
-      }
-      throw new NotImplementedException(String.format(
-          "Scan of partition '%s' in format '%s' of table '%s' is not supported " +
-          "because the table has a column '%s' with a complex type '%s'.\n%s.",
-          part.getPartitionName(), format, desc_.getAlias(),
-          firstComplexTypedCol.getName(), firstComplexTypedCol.getType().toSql(),
-          errSuffix));
-    }
-  }
-
-  public boolean isPartitionedTable() {
-    return desc_.getTable().getNumClusteringCols() > 0;
-  }
-
-  /**
-   * Populates the collection conjuncts, materializes their required slots, and marks
-   * the conjuncts as assigned, if it is correct to do so. Some conjuncts may have to
-   * also be evaluated at a subsequent semi or outer join.
-   */
-  private void assignCollectionConjuncts(Analyzer analyzer) {
-    collectionConjuncts_.clear();
-    assignCollectionConjuncts(desc_, analyzer);
-  }
-
-  /**
-   * Recursively collects and assigns conjuncts bound by tuples materialized in a
-   * collection-typed slot.
-   *
-   * Limitation: Conjuncts that must first be migrated into inline views and that cannot
-   * be captured by slot binding will not be assigned here, but in an UnnestNode.
-   * This limitation applies to conjuncts bound by inline-view slots that are backed by
-   * non-SlotRef exprs in the inline-view's select list. We only capture value transfers
-   * between slots, and not between arbitrary exprs.
-   *
-   * TODO for 2.3: The logic for gathering conjuncts and deciding which ones should be
-   * marked as assigned needs to be clarified and consolidated in one place. The code
-   * below is rather different from the code for assigning the top-level conjuncts in
-   * init() although the performed tasks is conceptually identical. Refactoring the
-   * assignment code is tricky/risky for now.
-   */
-  private void assignCollectionConjuncts(TupleDescriptor tupleDesc, Analyzer analyzer) {
-    for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-      if (!slotDesc.getType().isCollectionType()) continue;
-      Preconditions.checkNotNull(slotDesc.getItemTupleDesc());
-      TupleDescriptor itemTupleDesc = slotDesc.getItemTupleDesc();
-      TupleId itemTid = itemTupleDesc.getId();
-      // First collect unassigned and binding predicates. Then remove redundant
-      // predicates based on slot equivalences and enforce slot equivalences by
-      // generating new predicates.
-      List<Expr> collectionConjuncts =
-          analyzer.getUnassignedConjuncts(Lists.newArrayList(itemTid));
-      ArrayList<Expr> bindingPredicates = analyzer.getBoundPredicates(itemTid);
-      for (Expr boundPred: bindingPredicates) {
-        if (!collectionConjuncts.contains(boundPred)) collectionConjuncts.add(boundPred);
-      }
-      analyzer.createEquivConjuncts(itemTid, collectionConjuncts);
-      // Mark those conjuncts as assigned that do not also need to be evaluated by a
-      // subsequent semi or outer join.
-      for (Expr conjunct: collectionConjuncts) {
-        if (!analyzer.evalByJoin(conjunct)) analyzer.markConjunctAssigned(conjunct);
-      }
-      if (!collectionConjuncts.isEmpty()) {
-        analyzer.materializeSlots(collectionConjuncts);
-        collectionConjuncts_.put(itemTupleDesc, collectionConjuncts);
-      }
-      // Recursively look for collection-typed slots in nested tuple descriptors.
-      assignCollectionConjuncts(itemTupleDesc, analyzer);
-    }
-  }
-
-  /**
-   * Computes scan ranges (hdfs splits) plus their storage locations, including volume
-   * ids, based on the given maximum number of bytes each scan range should scan.
-   */
-  private void computeScanRangeLocations(Analyzer analyzer) {
-    long maxScanRangeLength = analyzer.getQueryCtx().getRequest().getQuery_options()
-        .getMax_scan_range_length();
-    scanRanges_ = Lists.newArrayList();
-    for (HdfsPartition partition: partitions_) {
-      Preconditions.checkState(partition.getId() >= 0);
-      for (HdfsPartition.FileDescriptor fileDesc: partition.getFileDescriptors()) {
-        for (THdfsFileBlock thriftBlock: fileDesc.getFileBlocks()) {
-          HdfsPartition.FileBlock block = FileBlock.fromThrift(thriftBlock);
-          List<Integer> replicaHostIdxs = block.getReplicaHostIdxs();
-          if (replicaHostIdxs.size() == 0) {
-            // we didn't get locations for this block; for now, just ignore the block
-            // TODO: do something meaningful with that
-            continue;
-          }
-          // Collect the network address and volume ID of all replicas of this block.
-          List<TScanRangeLocation> locations = Lists.newArrayList();
-          for (int i = 0; i < replicaHostIdxs.size(); ++i) {
-            TScanRangeLocation location = new TScanRangeLocation();
-            // Translate from the host index (local to the HdfsTable) to network address.
-            Integer tableHostIdx = replicaHostIdxs.get(i);
-            TNetworkAddress networkAddress =
-                partition.getTable().getHostIndex().getEntry(tableHostIdx);
-            Preconditions.checkNotNull(networkAddress);
-            // Translate from network address to the global (to this request) host index.
-            Integer globalHostIdx = analyzer.getHostIndex().getIndex(networkAddress);
-            location.setHost_idx(globalHostIdx);
-            location.setVolume_id(block.getDiskId(i));
-            location.setIs_cached(block.isCached(i));
-            locations.add(location);
-          }
-          // create scan ranges, taking into account maxScanRangeLength
-          long currentOffset = block.getOffset();
-          long remainingLength = block.getLength();
-          while (remainingLength > 0) {
-            long currentLength = remainingLength;
-            if (maxScanRangeLength > 0 && remainingLength > maxScanRangeLength) {
-              currentLength = maxScanRangeLength;
-            }
-            TScanRange scanRange = new TScanRange();
-            scanRange.setHdfs_file_split(new THdfsFileSplit(
-                fileDesc.getFileName(), currentOffset, currentLength, partition.getId(),
-                fileDesc.getFileLength(), fileDesc.getFileCompression(),
-                fileDesc.getModificationTime()));
-            TScanRangeLocations scanRangeLocations = new TScanRangeLocations();
-            scanRangeLocations.scan_range = scanRange;
-            scanRangeLocations.locations = locations;
-            scanRanges_.add(scanRangeLocations);
-            remainingLength -= currentLength;
-            currentOffset += currentLength;
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Also computes totalBytes_, totalFiles_, numPartitionsMissingStats_,
-   * and sets hasCorruptTableStats_.
-   */
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    LOG.debug("collecting partitions for table " + tbl_.getName());
-    numPartitionsMissingStats_ = 0;
-    totalFiles_ = 0;
-    totalBytes_ = 0;
-    if (tbl_.getNumClusteringCols() == 0) {
-      cardinality_ = tbl_.getNumRows();
-      if (cardinality_ < -1 || (cardinality_ == 0 && tbl_.getTotalHdfsBytes() > 0)) {
-        hasCorruptTableStats_ = true;
-      }
-      if (partitions_.isEmpty()) {
-        // Nothing to scan. Definitely a cardinality of 0 even if we have no stats.
-        cardinality_ = 0;
-      } else {
-        Preconditions.checkState(partitions_.size() == 1);
-        totalFiles_ += partitions_.get(0).getFileDescriptors().size();
-        totalBytes_ += partitions_.get(0).getSize();
-      }
-    } else {
-      cardinality_ = 0;
-      boolean hasValidPartitionCardinality = false;
-      for (HdfsPartition p: partitions_) {
-        // Check for corrupt table stats
-        if (p.getNumRows() < -1  || (p.getNumRows() == 0 && p.getSize() > 0))  {
-          hasCorruptTableStats_ = true;
-        }
-        // ignore partitions with missing stats in the hope they don't matter
-        // enough to change the planning outcome
-        if (p.getNumRows() > -1) {
-          cardinality_ = addCardinalities(cardinality_, p.getNumRows());
-          hasValidPartitionCardinality = true;
-        } else {
-          ++numPartitionsMissingStats_;
-        }
-        totalFiles_ += p.getFileDescriptors().size();
-        totalBytes_ += p.getSize();
-      }
-      if (!partitions_.isEmpty() && !hasValidPartitionCardinality) {
-        // if none of the partitions knew its number of rows, we fall back on
-        // the table stats
-        cardinality_ = tbl_.getNumRows();
-      }
-    }
-    // Adjust cardinality for all collections referenced along the tuple's path.
-    if (cardinality_ != -1) {
-      for (Type t: desc_.getPath().getMatchedTypes()) {
-        if (t.isCollectionType()) cardinality_ *= PlannerContext.AVG_COLLECTION_SIZE;
-      }
-    }
-    inputCardinality_ = cardinality_;
-
-    // Sanity check scan node cardinality.
-    if (cardinality_ < -1) {
-      hasCorruptTableStats_ = true;
-      cardinality_ = -1;
-    }
-
-    if (cardinality_ > 0) {
-      LOG.debug("cardinality_=" + Long.toString(cardinality_) +
-                " sel=" + Double.toString(computeSelectivity()));
-      cardinality_ = Math.round(cardinality_ * computeSelectivity());
-      // IMPALA-2165: Avoid setting the cardinality to 0 after rounding.
-      cardinality_ = Math.max(cardinality_, 1);
-    }
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HdfsScan: cardinality_=" + Long.toString(cardinality_));
-
-    computeNumNodes(analyzer, cardinality_);
-    LOG.debug("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_));
-  }
-
-  /**
-   * Estimate the number of impalad nodes that this scan node will execute on (which is
-   * ultimately determined by the scheduling done by the backend's SimpleScheduler).
-   * Assume that scan ranges that can be scheduled locally will be, and that scan
-   * ranges that cannot will be round-robined across the cluster.
-   */
-  protected void computeNumNodes(Analyzer analyzer, long cardinality) {
-    Preconditions.checkNotNull(scanRanges_);
-    MembershipSnapshot cluster = MembershipSnapshot.getCluster();
-    HashSet<TNetworkAddress> localHostSet = Sets.newHashSet();
-    int totalNodes = 0;
-    int numLocalRanges = 0;
-    int numRemoteRanges = 0;
-    for (TScanRangeLocations range: scanRanges_) {
-      boolean anyLocal = false;
-      for (TScanRangeLocation loc: range.locations) {
-        TNetworkAddress dataNode = analyzer.getHostIndex().getEntry(loc.getHost_idx());
-        if (cluster.contains(dataNode)) {
-          anyLocal = true;
-          // Use the full datanode address (including port) to account for the test
-          // minicluster where there are multiple datanodes and impalads on a single
-          // host.  This assumes that when an impalad is colocated with a datanode,
-          // there are the same number of impalads as datanodes on this host in this
-          // cluster.
-          localHostSet.add(dataNode);
-        }
-      }
-      // This range has at least one replica with a colocated impalad, so assume it
-      // will be scheduled on one of those nodes.
-      if (anyLocal) {
-        ++numLocalRanges;
-      } else {
-        ++numRemoteRanges;
-      }
-      // Approximate the number of nodes that will execute locally assigned ranges to
-      // be the smaller of the number of locally assigned ranges and the number of
-      // hosts that hold block replica for those ranges.
-      int numLocalNodes = Math.min(numLocalRanges, localHostSet.size());
-      // The remote ranges are round-robined across all the impalads.
-      int numRemoteNodes = Math.min(numRemoteRanges, cluster.numNodes());
-      // The local and remote assignments may overlap, but we don't know by how much so
-      // conservatively assume no overlap.
-      totalNodes = Math.min(numLocalNodes + numRemoteNodes, cluster.numNodes());
-      // Exit early if all hosts have a scan range assignment, to avoid extraneous work
-      // in case the number of scan ranges dominates the number of nodes.
-      if (totalNodes == cluster.numNodes()) break;
-    }
-    // Tables can reside on 0 nodes (empty table), but a plan node must always be
-    // executed on at least one node.
-    numNodes_ = (cardinality == 0 || totalNodes == 0) ? 1 : totalNodes;
-    LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() +
-        " localRanges=" + numLocalRanges + " remoteRanges=" + numRemoteRanges +
-        " localHostSet.size=" + localHostSet.size() +
-        " clusterNodes=" + cluster.numNodes());
-  }
-
-  /**
-   * Approximate the cost of evaluating all conjuncts bound by this node by
-   * aggregating total number of nodes in expression trees of all conjuncts.
-   */
-  private int computeConjunctsCost() {
-    int cost = 0;
-    for (Expr expr: getConjuncts()) {
-      cost += expr.numNodes();
-    }
-    for (List<Expr> exprs: collectionConjuncts_.values()) {
-      for (Expr expr: exprs) {
-        cost += expr.numNodes();
-      }
-    }
-    return cost;
-  }
-
-  /**
-   * Scan node is not a codegen-enabled operator. Decide whether to use codegen for
-   * conjuncts evaluation by estimating the cost of interpretation.
-   */
-  private void checkForCodegen(Analyzer analyzer) {
-    long conjunctsCost = computeConjunctsCost();
-    long inputCardinality = getInputCardinality();
-    long threshold =
-        analyzer.getQueryCtx().getRequest().query_options.scan_node_codegen_threshold;
-    if (inputCardinality == -1) {
-      codegenConjuncts_ = conjunctsCost > 0;
-    } else {
-      codegenConjuncts_ = inputCardinality * conjunctsCost > threshold;
-    }
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.hdfs_scan_node = new THdfsScanNode(desc_.getId().asInt());
-    if (replicaPreference_ != null) {
-      msg.hdfs_scan_node.setReplica_preference(replicaPreference_);
-    }
-    msg.hdfs_scan_node.setRandom_replica(randomReplica_);
-    msg.node_type = TPlanNodeType.HDFS_SCAN_NODE;
-    msg.hdfs_scan_node.setCodegen_conjuncts(codegenConjuncts_);
-    if (!collectionConjuncts_.isEmpty()) {
-      Map<Integer, List<TExpr>> tcollectionConjuncts = Maps.newLinkedHashMap();
-      for (Map.Entry<TupleDescriptor, List<Expr>> entry:
-        collectionConjuncts_.entrySet()) {
-        tcollectionConjuncts.put(entry.getKey().getId().asInt(),
-            Expr.treesToThrift(entry.getValue()));
-      }
-      msg.hdfs_scan_node.setCollection_conjuncts(tcollectionConjuncts);
-    }
-    if (skipHeaderLineCount_ > 0) {
-      msg.hdfs_scan_node.setSkip_header_line_count(skipHeaderLineCount_);
-    }
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    HdfsTable table = (HdfsTable) desc_.getTable();
-    List<String> path = Lists.newArrayList();
-    path.add(table.getDb().getName());
-    path.add(table.getName());
-    Preconditions.checkNotNull(desc_.getPath());
-    if (desc_.hasExplicitAlias()) {
-      return desc_.getPath().toString() + " " + desc_.getAlias();
-    } else {
-      return desc_.getPath().toString();
-    }
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    HdfsTable table = (HdfsTable) desc_.getTable();
-    output.append(String.format("%s%s [%s", prefix, getDisplayLabel(),
-        getDisplayLabelDetail()));
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal() &&
-        fragment_.isPartitioned()) {
-      output.append(", " + fragment_.getDataPartition().getExplainString());
-    }
-    output.append("]\n");
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      int numPartitions = partitions_.size();
-      if (tbl_.getNumClusteringCols() == 0) numPartitions = 1;
-      output.append(String.format("%spartitions=%s/%s files=%s size=%s", detailPrefix,
-          numPartitions, table.getPartitions().size() - 1, totalFiles_,
-          PrintUtils.printBytes(totalBytes_)));
-      output.append("\n");
-      if (!conjuncts_.isEmpty()) {
-        output.append(
-            detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-      }
-      if (!collectionConjuncts_.isEmpty()) {
-        for (Map.Entry<TupleDescriptor, List<Expr>> entry:
-          collectionConjuncts_.entrySet()) {
-          String alias = entry.getKey().getAlias();
-          output.append(String.format("%spredicates on %s: %s\n",
-              detailPrefix, alias, getExplainString(entry.getValue())));
-        }
-      }
-      if (!runtimeFilters_.isEmpty()) {
-        output.append(detailPrefix + "runtime filters: ");
-        output.append(getRuntimeFilterExplainString(false));
-      }
-    }
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(getStatsExplainString(detailPrefix, detailLevel));
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    Preconditions.checkNotNull(scanRanges_, "Cost estimation requires scan ranges.");
-    if (scanRanges_.isEmpty()) {
-      perHostMemCost_ = 0;
-      return;
-    }
-    Preconditions.checkState(0 < numNodes_ && numNodes_ <= scanRanges_.size());
-    Preconditions.checkNotNull(desc_);
-    Preconditions.checkNotNull(desc_.getTable() instanceof HdfsTable);
-    HdfsTable table = (HdfsTable) desc_.getTable();
-    int perHostScanRanges;
-    if (table.getMajorityFormat() == HdfsFileFormat.PARQUET) {
-      // For the purpose of this estimation, the number of per-host scan ranges for
-      // Parquet files are equal to the number of non-partition columns scanned.
-      perHostScanRanges = 0;
-      for (SlotDescriptor slot: desc_.getSlots()) {
-        if (slot.getColumn() == null ||
-            slot.getColumn().getPosition() >= table.getNumClusteringCols()) {
-          ++perHostScanRanges;
-        }
-      }
-    } else {
-      perHostScanRanges = (int) Math.ceil((
-          (double) scanRanges_.size() / (double) numNodes_) * SCAN_RANGE_SKEW_FACTOR);
-    }
-
-    // TODO: The total memory consumption for a particular query depends on the number
-    // of *available* cores, i.e., it depends the resource consumption of other
-    // concurrent queries. Figure out how to account for that.
-    int maxScannerThreads = Math.min(perHostScanRanges,
-        RuntimeEnv.INSTANCE.getNumCores() * THREADS_PER_CORE);
-    // Account for the max scanner threads query option.
-    if (queryOptions.isSetNum_scanner_threads() &&
-        queryOptions.getNum_scanner_threads() > 0) {
-      maxScannerThreads =
-          Math.min(maxScannerThreads, queryOptions.getNum_scanner_threads());
-    }
-
-    long avgScanRangeBytes = (long) Math.ceil(totalBytes_ / (double) scanRanges_.size());
-    // The +1 accounts for an extra I/O buffer to read past the scan range due to a
-    // trailing record spanning Hdfs blocks.
-    long perThreadIoBuffers =
-        Math.min((long) Math.ceil(avgScanRangeBytes / (double) IO_MGR_BUFFER_SIZE),
-            MAX_IO_BUFFERS_PER_THREAD) + 1;
-    perHostMemCost_ = maxScannerThreads * perThreadIoBuffers * IO_MGR_BUFFER_SIZE;
-
-    // Sanity check: the tighter estimation should not exceed the per-host maximum.
-    long perHostUpperBound = getPerHostMemUpperBound();
-    if (perHostMemCost_ > perHostUpperBound) {
-      LOG.warn(String.format("Per-host mem cost %s exceeded per-host upper bound %s.",
-          PrintUtils.printBytes(perHostMemCost_),
-          PrintUtils.printBytes(perHostUpperBound)));
-      perHostMemCost_ = perHostUpperBound;
-    }
-  }
-
-  /**
-   * Hdfs scans use a shared pool of buffers managed by the I/O manager. Intuitively,
-   * the maximum number of I/O buffers is limited by the total disk bandwidth of a node.
-   * Therefore, this upper bound is independent of the number of concurrent scans and
-   * queries and helps to derive a tighter per-host memory estimate for queries with
-   * multiple concurrent scans.
-   */
-  public static long getPerHostMemUpperBound() {
-    // THREADS_PER_CORE each using a default of
-    // MAX_IO_BUFFERS_PER_THREAD * IO_MGR_BUFFER_SIZE bytes.
-    return (long) RuntimeEnv.INSTANCE.getNumCores() * (long) THREADS_PER_CORE *
-        MAX_IO_BUFFERS_PER_THREAD * IO_MGR_BUFFER_SIZE;
-  }
-
-  @Override
-  public boolean hasCorruptTableStats() { return hasCorruptTableStats_; }
-}



[58/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
index 009535c..650c77c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TAlterTableOrViewRenameParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
index c7e796c..efb8a70 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
 
 /**
  * Represents an ALTER TABLE RECOVER PARTITIONS statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
index f63cf3e..56122e1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableSetCachedParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
index e9c9a14..86c8ad8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetColumnStats.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.HashMap;
 import java.util.Map;
 
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.TAlterTableUpdateStatsParams;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index 7e18aa6..ff8d878 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.THdfsFileFormat;
 
 /**
  * Represents an ALTER TABLE [PARTITION partitionSpec] SET FILEFORMAT statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index fcc9b53..0219b8c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
-import com.cloudera.impala.thrift.TAlterTableType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableSetLocationParams;
+import org.apache.impala.thrift.TAlterTableType;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.permission.FsAction;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
index 2857211..a37b949 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
 
 /**
  * Base class for all ALTER TABLE ... SET statements

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
index da92267..1ff744d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.HashMap;
 import java.util.List;
@@ -24,14 +24,14 @@ import java.util.Map;
 import org.apache.avro.SchemaParseException;
 import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
 
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.*;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.MetaStoreUtil;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.*;
+import org.apache.impala.util.AvroSchemaParser;
+import org.apache.impala.util.AvroSchemaUtils;
+import org.apache.impala.util.MetaStoreUtil;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index aebd009..223abe9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
index 640b3a1..e7d3f66 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterViewStmt.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.RuntimeEnv;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.RuntimeEnv;
 
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
index 006474d..24d1f45 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.StringReader;
 import java.util.List;
@@ -25,21 +25,21 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.authorization.AuthorizationChecker;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.AuthorizeableColumn;
-import com.cloudera.impala.authorization.AuthorizeableTable;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TQueryCtx;
+import org.apache.impala.authorization.AuthorizationChecker;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizeableColumn;
+import org.apache.impala.authorization.AuthorizeableTable;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TLineageGraph;
+import org.apache.impala.thrift.TQueryCtx;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
index 9abd82d..7eead02 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticExpr.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
@@ -25,19 +25,19 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AnalyticWindow.Boundary;
-import com.cloudera.impala.analysis.AnalyticWindow.BoundaryType;
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.util.TColumnValueUtil;
+import org.apache.impala.analysis.AnalyticWindow.Boundary;
+import org.apache.impala.analysis.AnalyticWindow.BoundaryType;
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.TreeNode;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.util.TColumnValueUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
index d0d1a85..cff6f35 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticInfo.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,7 +23,7 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.Type;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java b/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
index 68558da..6fda1a6 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalyticWindow.java
@@ -15,19 +15,19 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAnalyticWindow;
-import com.cloudera.impala.thrift.TAnalyticWindowBoundary;
-import com.cloudera.impala.thrift.TAnalyticWindowBoundaryType;
-import com.cloudera.impala.thrift.TAnalyticWindowType;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.util.TColumnValueUtil;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TAnalyticWindow;
+import org.apache.impala.thrift.TAnalyticWindowBoundary;
+import org.apache.impala.thrift.TAnalyticWindowBoundaryType;
+import org.apache.impala.thrift.TAnalyticWindowType;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.util.TColumnValueUtil;
 import com.google.common.base.Preconditions;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index a931489..3edddf2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -33,43 +33,43 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.authorization.AuthorizationConfig;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.DatabaseNotFoundException;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.ImpaladCatalog;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.planner.PlanNode;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.util.DisjointSet;
-import com.cloudera.impala.util.EventSequence;
-import com.cloudera.impala.util.ListMap;
-import com.cloudera.impala.util.TSessionStateUtil;
+import org.apache.impala.analysis.Path.PathType;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.CatalogException;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.DatabaseNotFoundException;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.ImpaladCatalog;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.planner.PlanNode;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TLineageGraph;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.util.DisjointSet;
+import org.apache.impala.util.EventSequence;
+import org.apache.impala.util.ListMap;
+import org.apache.impala.util.TSessionStateUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java b/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
index bf8b0ea..e41cbb8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
index 4e88014..9bfe2e6 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Strings;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
index 69780e0..fd1c455 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java b/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
index d76a4c6..5e412f7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java b/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
index 35d03e1..ccb09de 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -24,16 +24,16 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.extdatasource.thrift.TComparisonOp;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.common.Reference;
+import org.apache.impala.extdatasource.thrift.TComparisonOp;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java b/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
index 03b2b1f..9d7b2fd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TBoolLiteral;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TBoolLiteral;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 
 public class BoolLiteral extends LiteralExpr {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java b/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
index bd3ec83..4440690 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCaseExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCaseExpr;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java b/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
index 8b1433e..e8dd948 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CastExpr.java b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
index 2b3b271..33ae0e8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java b/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
index b45b856..d05a6b2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
 import jline.internal.Preconditions;
 
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
index 8abed3e..e8abaa8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
index e7a3170..6b2a1d2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.util.MetaStoreUtil;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.util.MetaStoreUtil;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
index a00bf53..131d75a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnLineageGraph.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.text.SimpleDateFormat;
 import java.util.Collection;
@@ -34,14 +34,14 @@ import org.json.simple.parser.ParseException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TEdgeType;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.cloudera.impala.thrift.TLineageGraph;
-import com.cloudera.impala.thrift.TMultiEdge;
-import com.cloudera.impala.thrift.TVertex;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
+import org.apache.impala.thrift.TEdgeType;
+import org.apache.impala.thrift.TQueryCtx;
+import org.apache.impala.thrift.TLineageGraph;
+import org.apache.impala.thrift.TMultiEdge;
+import org.apache.impala.thrift.TVertex;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java b/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
index 4869004..d549053 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CompoundPredicate.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.TExprNodeType;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index cd01713..467faca 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Iterator;
 import java.util.List;
@@ -23,19 +23,19 @@ import java.util.List;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TComputeStatsParams;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.TComputeStatsParams;
+import org.apache.impala.thrift.TPartitionStats;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
index 1ee6fd4..30ca223 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.extdatasource.ApiVersion;
-import com.cloudera.impala.thrift.TCreateDataSourceParams;
-import com.cloudera.impala.thrift.TDataSource;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.extdatasource.ApiVersion;
+import org.apache.impala.thrift.TCreateDataSourceParams;
+import org.apache.impala.thrift.TDataSource;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
index 3dedd8b..a9fbf20 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateDbParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCreateDbParams;
 
 /**
  * Represents a CREATE DATABASE statement

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
index ef90b8a..3704e97 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDropRoleStmt.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateDropRoleParams;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCreateDropRoleParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
index ebfd7b6..b9c49e5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateFunctionStmtBase.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -23,16 +23,16 @@ import java.util.List;
 
 import org.apache.hadoop.fs.permission.FsAction;
 
-import com.cloudera.impala.authorization.AuthorizeableFn;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateFunctionParams;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.authorization.AuthorizeableFn;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCreateFunctionParams;
+import org.apache.impala.thrift.TFunctionBinaryType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
index cc04b04..1931930 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateOrAlterViewStmtBase.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,9 +24,9 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TCreateOrAlterViewParams;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
index 7b59625..b2a95c4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableAsSelectStmt.java
@@ -15,22 +15,22 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 import java.util.EnumSet;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.TableId;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.service.CatalogOpExecutor;
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableId;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.service.CatalogOpExecutor;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
index 0faf881..3c54dfd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableDataSrcStmt.java
@@ -15,23 +15,23 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_API_VER;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_CLASS;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_DATA_SRC_NAME;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_INIT_STRING;
-import static com.cloudera.impala.catalog.DataSourceTable.TBL_PROP_LOCATION;
+import static org.apache.impala.catalog.DataSourceTable.TBL_PROP_API_VER;
+import static org.apache.impala.catalog.DataSourceTable.TBL_PROP_CLASS;
+import static org.apache.impala.catalog.DataSourceTable.TBL_PROP_DATA_SRC_NAME;
+import static org.apache.impala.catalog.DataSourceTable.TBL_PROP_INIT_STRING;
+import static org.apache.impala.catalog.DataSourceTable.TBL_PROP_LOCATION;
 
 import java.util.List;
 import java.util.Map;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
index 6695cac..a9a8a90 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeFileStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -32,19 +32,19 @@ import parquet.hadoop.metadata.ParquetMetadata;
 import parquet.schema.OriginalType;
 import parquet.schema.PrimitiveType;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.ArrayType;
-import com.cloudera.impala.catalog.HdfsCompression;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.MapType;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.StructField;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.HdfsCompression;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
index a7e2038..72843e8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.fs.permission.FsAction;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCreateTableLikeParams;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TCreateTableLikeParams;
+import org.apache.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
index f7b683f..3acb1a3 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -26,22 +26,22 @@ import org.apache.avro.Schema;
 import org.apache.avro.SchemaParseException;
 import org.apache.hadoop.fs.permission.FsAction;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsStorageDescriptor;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.RowFormat;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TCreateTableParams;
-import com.cloudera.impala.thrift.THdfsFileFormat;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.AvroSchemaConverter;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.KuduUtil;
-import com.cloudera.impala.util.MetaStoreUtil;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsStorageDescriptor;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.RowFormat;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TCreateTableParams;
+import org.apache.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.util.AvroSchemaConverter;
+import org.apache.impala.util.AvroSchemaParser;
+import org.apache.impala.util.AvroSchemaUtils;
+import org.apache.impala.util.KuduUtil;
+import org.apache.impala.util.MetaStoreUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
index 46b0003..40d66d2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateUdaStmt.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TSymbolType;
+import org.apache.impala.catalog.AggregateFunction;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TSymbolType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
index 550d26f..4d5f3ed 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateUdfStmt.java
@@ -15,23 +15,23 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.hive.executor.UdfExecutor.JavaUdfDataType;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TSymbolType;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.hive.executor.UdfExecutor.JavaUdfDataType;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TSymbolType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
index c38eef0..6e30aeb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateViewStmt.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TCatalogObjectType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.thrift.TAccessEvent;
+import org.apache.impala.thrift.TCatalogObjectType;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java b/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
index efa2117..62606b8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DeleteStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
-import com.cloudera.impala.planner.KuduTableSink;
-import com.cloudera.impala.planner.TableSink;
+import org.apache.impala.common.Pair;
+import org.apache.impala.planner.DataSink;
+import org.apache.impala.planner.KuduTableSink;
+import org.apache.impala.planner.TableSink;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
index 0ddd6ec..879e4e1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DescribeDbStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDescribeDbParams;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDescribeDbParams;
+import org.apache.impala.thrift.TDescribeOutputStyle;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
index b1f9b95..b947e8e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DescribeTableStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 
@@ -23,14 +23,14 @@ import org.apache.commons.lang3.StringUtils;
 
 import parquet.Strings;
 
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeTableParams;
+import org.apache.impala.analysis.Path.PathType;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequestBuilder;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.thrift.TDescribeTableParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
index c0d7571..0f59fdb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DescriptorTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.Collection;
 import java.util.HashMap;
@@ -24,10 +24,10 @@ import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
 
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TDescriptorTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.IdGenerator;
+import org.apache.impala.thrift.TDescriptorTable;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
index e718d6b..319fe50 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
@@ -15,19 +15,19 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDistributeByHashParam;
-import com.cloudera.impala.thrift.TDistributeByRangeParam;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.thrift.TDistributeType;
-import com.cloudera.impala.thrift.TRangeLiteral;
-import com.cloudera.impala.thrift.TRangeLiteralList;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDistributeByHashParam;
+import org.apache.impala.thrift.TDistributeByRangeParam;
+import org.apache.impala.thrift.TDistributeParam;
+import org.apache.impala.thrift.TDistributeType;
+import org.apache.impala.thrift.TRangeLiteral;
+import org.apache.impala.thrift.TRangeLiteralList;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
index f5642fa..e3dd9a8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropDataSourceParams;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDropDataSourceParams;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
index af7fae1..e832878 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDbStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropDbParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDropDbParams;
 
 /**
  * Represents a DROP [IF EXISTS] DATABASE [CASCADE | RESTRICT] statement

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
index 39f5ff9..e8f6666 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropFunctionStmt.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.AuthorizeableFn;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropFunctionParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
+import org.apache.impala.authorization.AuthorizeableFn;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.catalog.Db;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDropFunctionParams;
+import org.apache.impala.thrift.TFunctionCategory;
 
 import java.util.ArrayList;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
index 90f9434..fe2ff84 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropStatsParams;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDropStatsParams;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
index 8371ace..4ed03ba 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropTableOrViewStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropTableOrViewParams;
-import com.cloudera.impala.thrift.TTableName;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TDropTableOrViewParams;
+import org.apache.impala.thrift.TTableName;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java b/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
index df658b9..f19079c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
+++ b/fe/src/main/java/org/apache/impala/analysis/EquivalenceClassId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class EquivalenceClassId extends Id<EquivalenceClassId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java b/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
index da984eb..3131acf 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ExistsPredicate.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TExprNode;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/Expr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Expr.java b/fe/src/main/java/org/apache/impala/analysis/Expr.java
index fdc5bf1..35a87f7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Expr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Expr.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.lang.reflect.Method;
 import java.util.ArrayList;
@@ -27,16 +27,16 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.TreeNode;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.catalog.Function.CompareMode;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.TreeNode;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TExprNode;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
@@ -249,7 +249,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   /**
    * Perform semantic analysis of node and all of its children.
    * Throws exception if any errors found.
-   * @see com.cloudera.impala.parser.ParseNode#analyze(com.cloudera.impala.parser.Analyzer)
+   * @see org.apache.impala.parser.ParseNode#analyze(org.apache.impala.parser.Analyzer)
    */
   public void analyze(Analyzer analyzer) throws AnalysisException {
     // Check the expr child limit.



[56/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java b/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
index 10758c7..4f65e0f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/UnionStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,8 +23,8 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/UpdateStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/UpdateStmt.java b/fe/src/main/java/org/apache/impala/analysis/UpdateStmt.java
index 34e907b..8b0d96d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/UpdateStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/UpdateStmt.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.List;
 
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.planner.DataSink;
-import com.cloudera.impala.planner.KuduTableSink;
-import com.cloudera.impala.planner.TableSink;
+import org.apache.impala.common.Pair;
+import org.apache.impala.planner.DataSink;
+import org.apache.impala.planner.KuduTableSink;
+import org.apache.impala.planner.TableSink;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/UseStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/UseStmt.java b/fe/src/main/java/org/apache/impala/analysis/UseStmt.java
index c62c454..040f013 100644
--- a/fe/src/main/java/org/apache/impala/analysis/UseStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/UseStmt.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Catalog;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TUseDbParams;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Catalog;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TUseDbParams;
 
 /**
  * Representation of a USE db statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/ValuesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ValuesStmt.java b/fe/src/main/java/org/apache/impala/analysis/ValuesStmt.java
index ed3339d..42cce7a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ValuesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ValuesStmt.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/analysis/WithClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/WithClause.java b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
index 70e1f29..3e70764 100644
--- a/fe/src/main/java/org/apache/impala/analysis/WithClause.java
+++ b/fe/src/main/java/org/apache/impala/analysis/WithClause.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.analysis;
+package org.apache.impala.analysis;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
+import org.apache.impala.authorization.PrivilegeRequest;
+import org.apache.impala.catalog.View;
+import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
index 32f60df..b9a6440 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.EnumSet;
 import java.util.List;
@@ -33,9 +33,9 @@ import org.apache.sentry.provider.common.ProviderBackendContext;
 import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
 import org.apache.sentry.provider.file.SimpleFileProviderBackend;
 
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.AuthorizationPolicy;
-import com.cloudera.impala.common.InternalException;
+import org.apache.impala.catalog.AuthorizationException;
+import org.apache.impala.catalog.AuthorizationPolicy;
+import org.apache.impala.common.InternalException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizationConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizationConfig.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizationConfig.java
index 34dbada..80611d1 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizationConfig.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizationConfig.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 
 import org.apache.sentry.provider.common.HadoopGroupResourceAuthorizationProvider;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/Authorizeable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/Authorizeable.java b/fe/src/main/java/org/apache/impala/authorization/Authorizeable.java
index 5782ed5..7e30332 100644
--- a/fe/src/main/java/org/apache/impala/authorization/Authorizeable.java
+++ b/fe/src/main/java/org/apache/impala/authorization/Authorizeable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableColumn.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableColumn.java
index d5c9cd0..0af3dfd 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableColumn.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableColumn.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
index e27ac52..b358528 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableDb.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableFn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableFn.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableFn.java
index 0ff4ca3..e74b435 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableFn.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableFn.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableServer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableServer.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableServer.java
index 4ca3218..3046726 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableServer.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableServer.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableTable.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableTable.java
index e28d5a1..9526272 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableTable.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
index 1997457..ebef8f9 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizeableUri.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
index a5c14c1..b68e8ea 100644
--- a/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaInternalAdminUser.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 /*
  * A singleton class that represents a special user type used for internal Impala

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/Privilege.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/Privilege.java b/fe/src/main/java/org/apache/impala/authorization/Privilege.java
index 9b44517..c087b73 100644
--- a/fe/src/main/java/org/apache/impala/authorization/Privilege.java
+++ b/fe/src/main/java/org/apache/impala/authorization/Privilege.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.util.EnumSet;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
index bacc4b2..7dbc23a 100644
--- a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
+++ b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequest.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequestBuilder.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequestBuilder.java b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequestBuilder.java
index 13dc909..8cf146c 100644
--- a/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequestBuilder.java
+++ b/fe/src/main/java/org/apache/impala/authorization/PrivilegeRequestBuilder.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/SentryConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/SentryConfig.java b/fe/src/main/java/org/apache/impala/authorization/SentryConfig.java
index 48300f4..8ce74d1 100644
--- a/fe/src/main/java/org/apache/impala/authorization/SentryConfig.java
+++ b/fe/src/main/java/org/apache/impala/authorization/SentryConfig.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import java.io.File;
 import java.net.MalformedURLException;
 
 import org.apache.hadoop.conf.Configuration;
 
-import com.cloudera.impala.common.FileSystemUtil;
+import org.apache.impala.common.FileSystemUtil;
 import com.google.common.base.Strings;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/authorization/User.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/User.java b/fe/src/main/java/org/apache/impala/authorization/User.java
index a282e54..190419b 100644
--- a/fe/src/main/java/org/apache/impala/authorization/User.java
+++ b/fe/src/main/java/org/apache/impala/authorization/User.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.authorization;
+package org.apache.impala.authorization;
 
 import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.service.BackendConfig;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.service.BackendConfig;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java b/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
index eb968fd..ba8f972 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AggregateFunction.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.thrift.TAggregateFunction;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.HdfsUri;
+import org.apache.impala.thrift.TAggregateFunction;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
 
 /**
  * Internal representation of an aggregate function.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ArrayType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ArrayType.java b/fe/src/main/java/org/apache/impala/catalog/ArrayType.java
index 25edde2..e635aa5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ArrayType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ArrayType.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.commons.lang3.StringUtils;
 
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/AuthorizationException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AuthorizationException.java b/fe/src/main/java/org/apache/impala/catalog/AuthorizationException.java
index ab97c83..909a75b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
index f37f3dc..946ca30 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Map;
@@ -26,12 +26,12 @@ import org.apache.log4j.Logger;
 import org.apache.sentry.core.common.ActiveRoleSet;
 import org.apache.sentry.provider.cache.PrivilegeCache;
 
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.util.TResultRowBuilder;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java b/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
index 972756c..fb4accc 100644
--- a/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
+++ b/fe/src/main/java/org/apache/impala/catalog/BuiltinsDb.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -23,15 +23,15 @@ import java.util.Map;
 
 import org.apache.hadoop.hive.metastore.api.Database;
 
-import com.cloudera.impala.analysis.ArithmeticExpr;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.CaseExpr;
-import com.cloudera.impala.analysis.CastExpr;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.InPredicate;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.LikePredicate;
-import com.cloudera.impala.builtins.ScalarBuiltins;
+import org.apache.impala.analysis.ArithmeticExpr;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.CaseExpr;
+import org.apache.impala.analysis.CastExpr;
+import org.apache.impala.analysis.CompoundPredicate;
+import org.apache.impala.analysis.InPredicate;
+import org.apache.impala.analysis.IsNullPredicate;
+import org.apache.impala.analysis.LikePredicate;
+import org.apache.impala.builtins.ScalarBuiltins;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Catalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Catalog.java b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
index 4cd1c42..41573ed 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Catalog.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Collections;
 import java.util.Comparator;
@@ -25,13 +25,13 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.util.PatternMatcher;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/CatalogDeltaLog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogDeltaLog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogDeltaLog.java
index 72e4990..27839b3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogDeltaLog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogDeltaLog.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TTable;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TTable;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/CatalogException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogException.java b/fe/src/main/java/org/apache/impala/catalog/CatalogException.java
index 92fc3a0..d963a97 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogException.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaException;
 
 /**
  * Base class for exceptions related to accessing objects in the Catalog.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/CatalogObject.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogObject.java b/fe/src/main/java/org/apache/impala/catalog/CatalogObject.java
index e98bcde..a2d8ca9 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogObject.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogObject.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TCatalogObjectType;
 
 /**
  * Interface that all catalog objects implement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java b/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
index bd9e3f0..9132905 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogObjectCache.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Iterator;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index 27d25e1..41c8d62 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -52,28 +52,28 @@ import org.apache.log4j.Logger;
 import org.apache.thrift.protocol.TCompactProtocol;
 import org.apache.thrift.TException;
 
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.hive.executor.UdfExecutor;
-import com.cloudera.impala.thrift.TCatalog;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TGetAllCatalogObjectsResponse;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.util.PatternMatcher;
-import com.cloudera.impala.util.SentryProxy;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.authorization.SentryConfig;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.common.Pair;
+import org.apache.impala.hive.executor.UdfExecutor;
+import org.apache.impala.thrift.TCatalog;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TGetAllCatalogObjectsResponse;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.thrift.TUniqueId;
+import org.apache.impala.util.PatternMatcher;
+import org.apache.impala.util.SentryProxy;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Column.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java b/fe/src/main/java/org/apache/impala/catalog/Column.java
index b2d7416..ab064e7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Column.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Column.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 
@@ -24,8 +24,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnStats;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnStats;
 import com.google.common.base.Function;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ColumnNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ColumnNotFoundException.java b/fe/src/main/java/org/apache/impala/catalog/ColumnNotFoundException.java
index 4ea47c1..80582b6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ColumnNotFoundException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ColumnNotFoundException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ColumnStats.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ColumnStats.java b/fe/src/main/java/org/apache/impala/catalog/ColumnStats.java
index 8f8e4b3..0b161df 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ColumnStats.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ColumnStats.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Set;
 
@@ -29,9 +29,9 @@ import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.thrift.TColumnStats;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.thrift.TColumnStats;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/DataSource.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DataSource.java b/fe/src/main/java/org/apache/impala/catalog/DataSource.java
index ed0d9ee..e9601d7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DataSource.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DataSource.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.hadoop.fs.Path;
 
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDataSource;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TDataSource;
 import com.google.common.base.Objects;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
index c42c804..25fe10d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Set;
@@ -25,17 +25,17 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TDataSource;
-import com.cloudera.impala.thrift.TDataSourceTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.TResultRowBuilder;
+import org.apache.impala.extdatasource.v1.ExternalDataSource;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TDataSource;
+import org.apache.impala.thrift.TDataSourceTable;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
+import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/DatabaseNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DatabaseNotFoundException.java b/fe/src/main/java/org/apache/impala/catalog/DatabaseNotFoundException.java
index 8affb11..9193441 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DatabaseNotFoundException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DatabaseNotFoundException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Db.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Db.java b/fe/src/main/java/org/apache/impala/catalog/Db.java
index a9150fe..d6fb185 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Db.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Db.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Collections;
 import java.util.Comparator;
@@ -30,16 +30,16 @@ import org.apache.thrift.TSerializer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.util.PatternMatcher;
+import org.apache.impala.catalog.Function;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.util.PatternMatcher;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Function.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Function.java b/fe/src/main/java/org/apache/impala/catalog/Function.java
index 406e958..74df8c9 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Function.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Function.java
@@ -15,25 +15,25 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TAggregateFunction;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TFunctionCategory;
-import com.cloudera.impala.thrift.TScalarFunction;
-import com.cloudera.impala.thrift.TSymbolLookupParams;
-import com.cloudera.impala.thrift.TSymbolLookupResult;
-import com.cloudera.impala.thrift.TSymbolType;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.HdfsUri;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TAggregateFunction;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TFunctionCategory;
+import org.apache.impala.thrift.TScalarFunction;
+import org.apache.impala.thrift.TSymbolLookupParams;
+import org.apache.impala.thrift.TSymbolLookupResult;
+import org.apache.impala.thrift.TSymbolType;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HBaseColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HBaseColumn.java b/fe/src/main/java/org/apache/impala/catalog/HBaseColumn.java
index 37fa853..dae03fb 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HBaseColumn.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HBaseColumn.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumn;
 
 // Describes an HBase column mapped to a Hive column (as described in the metastore).
 // this.name describes the column name in Hive.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
index d96314e..3930610 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -56,17 +56,17 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.THBaseTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.StatsHelper;
-import com.cloudera.impala.util.TResultRowBuilder;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.THBaseTable;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
+import org.apache.impala.util.StatsHelper;
+import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsCachePool.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsCachePool.java b/fe/src/main/java/org/apache/impala/catalog/HdfsCachePool.java
index b8ff102..398bc87 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsCachePool.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsCachePool.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.THdfsCachePool;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.THdfsCachePool;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsCompression.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsCompression.java b/fe/src/main/java/org/apache/impala/catalog/HdfsCompression.java
index 302ec99..36a1e9e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsCompression.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsCompression.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.thrift.THdfsCompression;
+import org.apache.impala.thrift.THdfsCompression;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java b/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
index 475a8d7..86a65bd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsFileFormat.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Map;
 
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index f408468..8718419 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -29,25 +29,25 @@ import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.PartitionKeyValue;
-import com.cloudera.impala.analysis.ToSqlUtils;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.THdfsCompression;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsFileDesc;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.util.HdfsCachingUtil;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.PartitionKeyValue;
+import org.apache.impala.analysis.ToSqlUtils;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TExprNode;
+import org.apache.impala.thrift.THdfsCompression;
+import org.apache.impala.thrift.THdfsFileBlock;
+import org.apache.impala.thrift.THdfsFileDesc;
+import org.apache.impala.thrift.THdfsPartition;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPartitionStats;
+import org.apache.impala.thrift.TTableStats;
+import org.apache.impala.util.HdfsCachingUtil;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsPartitionLocationCompressor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartitionLocationCompressor.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartitionLocationCompressor.java
index b72b846..ce53c2d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartitionLocationCompressor.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartitionLocationCompressor.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.THdfsPartitionLocation;
-import com.cloudera.impala.util.ListMap;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.THdfsPartitionLocation;
+import org.apache.impala.util.ListMap;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java b/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
index f018ce3..93121c0 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsStorageDescriptor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 2464376..e664f6f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.io.IOException;
 import java.net.URI;
@@ -50,44 +50,44 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.ColumnDef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.PartitionKeyValue;
-import com.cloudera.impala.catalog.HdfsPartition.BlockReplica;
-import com.cloudera.impala.catalog.HdfsPartition.FileBlock;
-import com.cloudera.impala.catalog.HdfsPartition.FileDescriptor;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsPartition;
-import com.cloudera.impala.thrift.THdfsPartitionLocation;
-import com.cloudera.impala.thrift.THdfsTable;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPartitionKeyValue;
-import com.cloudera.impala.thrift.TResultRow;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.AvroSchemaConverter;
-import com.cloudera.impala.util.AvroSchemaParser;
-import com.cloudera.impala.util.AvroSchemaUtils;
-import com.cloudera.impala.util.FsPermissionChecker;
-import com.cloudera.impala.util.HdfsCachingUtil;
-import com.cloudera.impala.util.ListMap;
-import com.cloudera.impala.util.MetaStoreUtil;
-import com.cloudera.impala.util.TAccessLevelUtil;
-import com.cloudera.impala.util.TResultRowBuilder;
+import org.apache.impala.analysis.ColumnDef;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.NumericLiteral;
+import org.apache.impala.analysis.PartitionKeyValue;
+import org.apache.impala.catalog.HdfsPartition.BlockReplica;
+import org.apache.impala.catalog.HdfsPartition.FileBlock;
+import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.common.Pair;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.ImpalaInternalServiceConstants;
+import org.apache.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.THdfsFileBlock;
+import org.apache.impala.thrift.THdfsPartition;
+import org.apache.impala.thrift.THdfsPartitionLocation;
+import org.apache.impala.thrift.THdfsTable;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
+import org.apache.impala.util.AvroSchemaConverter;
+import org.apache.impala.util.AvroSchemaParser;
+import org.apache.impala.util.AvroSchemaUtils;
+import org.apache.impala.util.FsPermissionChecker;
+import org.apache.impala.util.HdfsCachingUtil;
+import org.apache.impala.util.ListMap;
+import org.apache.impala.util.MetaStoreUtil;
+import org.apache.impala.util.TAccessLevelUtil;
+import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
index 9644bbb..342e8d7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.HashMap;
 
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 
-import com.cloudera.impala.thrift.THdfsFileFormat;
+import org.apache.impala.thrift.THdfsFileFormat;
 import com.google.common.base.Preconditions;
 
 public class HiveStorageDescriptorFactory {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
index 02c9747..c416bee 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -25,19 +25,19 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TDataSource;
-import com.cloudera.impala.thrift.TDatabase;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TRole;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TUniqueId;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheRequest;
-import com.cloudera.impala.thrift.TUpdateCatalogCacheResponse;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TDataSource;
+import org.apache.impala.thrift.TDatabase;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TRole;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TUniqueId;
+import org.apache.impala.thrift.TUpdateCatalogCacheRequest;
+import org.apache.impala.thrift.TUpdateCatalogCacheResponse;
 
 /**
  * Thread safe Catalog for an Impalad.  The Impalad catalog can be updated either via

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/IncompleteTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/IncompleteTable.java b/fe/src/main/java/org/apache/impala/catalog/IncompleteTable.java
index 88bab5e..ec42036 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IncompleteTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IncompleteTable.java
@@ -15,20 +15,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TStatus;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/KuduColumn.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduColumn.java b/fe/src/main/java/org/apache/impala/catalog/KuduColumn.java
index 5e272d8..404dbf5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduColumn.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduColumn.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumn;
 
 /**
  *  Describes a Kudu column mapped to a Hive column (as described in the metastore).

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 71d897d..d55f8da 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -31,17 +31,17 @@ import org.apache.log4j.Logger;
 import org.apache.kudu.client.KuduClient;
 import org.apache.kudu.client.LocatedTablet;
 
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TKuduTable;
-import com.cloudera.impala.thrift.TResultSet;
-import com.cloudera.impala.thrift.TResultSetMetadata;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
-import com.cloudera.impala.util.KuduUtil;
-import com.cloudera.impala.util.TResultRowBuilder;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TKuduTable;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
+import org.apache.impala.util.KuduUtil;
+import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -146,7 +146,7 @@ public class KuduTable extends Table {
     Set<String> columnNames = Sets.newHashSet();
     int pos = 0;
     for (FieldSchema field: schema) {
-      com.cloudera.impala.catalog.Type type = parseColumnType(field);
+      org.apache.impala.catalog.Type type = parseColumnType(field);
       // TODO(kudu-merge): Check for decimal types?
       boolean isKey = keyColumns.contains(field.getName());
       KuduColumn col = new KuduColumn(field.getName(), isKey, !isKey, type,

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/MapType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/MapType.java b/fe/src/main/java/org/apache/impala/catalog/MapType.java
index 8bc7b05..4235181 100644
--- a/fe/src/main/java/org/apache/impala/catalog/MapType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/MapType.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.commons.lang3.StringUtils;
 
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
index 40eb4cf..372fe37 100644
--- a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
+++ b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.concurrent.ConcurrentLinkedQueue;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/PartitionNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PartitionNotFoundException.java b/fe/src/main/java/org/apache/impala/catalog/PartitionNotFoundException.java
index 9964a3c..60707ef 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PartitionNotFoundException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PartitionNotFoundException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
index 2f2022f..222f252 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.thrift.TPartitionStats;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.util.MetaStoreUtil;
+import org.apache.impala.thrift.TPartitionStats;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.util.MetaStoreUtil;
 
 import java.util.Iterator;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java b/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
index 4344b61..fa53c6c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PrimitiveType.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 
-import com.cloudera.impala.thrift.TPrimitiveType;
+import org.apache.impala.thrift.TPrimitiveType;
 import com.google.common.collect.Lists;
 
 public enum PrimitiveType {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Role.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Role.java b/fe/src/main/java/org/apache/impala/catalog/Role.java
index 7ba1bac..0b89866 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Role.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Role.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TRole;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TRole;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/RolePrivilege.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/RolePrivilege.java b/fe/src/main/java/org/apache/impala/catalog/RolePrivilege.java
index 37d91d8..58bb906 100644
--- a/fe/src/main/java/org/apache/impala/catalog/RolePrivilege.java
+++ b/fe/src/main/java/org/apache/impala/catalog/RolePrivilege.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.List;
 
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TPrivilegeScope;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/RowFormat.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/RowFormat.java b/fe/src/main/java/org/apache/impala/catalog/RowFormat.java
index 76dcacb..832d115 100644
--- a/fe/src/main/java/org/apache/impala/catalog/RowFormat.java
+++ b/fe/src/main/java/org/apache/impala/catalog/RowFormat.java
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Map;
 
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.thrift.TTableRowFormat;
+import org.apache.impala.analysis.StringLiteral;
+import org.apache.impala.thrift.TTableRowFormat;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java b/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
index b921015..766111e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ScalarFunction.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -25,14 +25,14 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.ResourceType;
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 
-import com.cloudera.impala.analysis.FunctionName;
-import com.cloudera.impala.analysis.HdfsUri;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.hive.executor.UdfExecutor.JavaUdfDataType;
-import com.cloudera.impala.thrift.TFunction;
-import com.cloudera.impala.thrift.TFunctionBinaryType;
-import com.cloudera.impala.thrift.TScalarFunction;
-import com.cloudera.impala.thrift.TSymbolType;
+import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.HdfsUri;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.hive.executor.UdfExecutor.JavaUdfDataType;
+import org.apache.impala.thrift.TFunction;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TScalarFunction;
+import org.apache.impala.thrift.TSymbolType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/ScalarType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ScalarType.java b/fe/src/main/java/org/apache/impala/catalog/ScalarType.java
index ae955f8..5f98b7c 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ScalarType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ScalarType.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.commons.lang3.StringUtils;
 
-import com.cloudera.impala.analysis.TypesUtil;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.analysis.TypesUtil;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TScalarType;
+import org.apache.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/StructField.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/StructField.java b/fe/src/main/java/org/apache/impala/catalog/StructField.java
index 4b9cb80..6e44714 100644
--- a/fe/src/main/java/org/apache/impala/catalog/StructField.java
+++ b/fe/src/main/java/org/apache/impala/catalog/StructField.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import org.apache.commons.lang3.StringUtils;
 
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TStructField;
+import org.apache.impala.thrift.TTypeNode;
 
 /**
  * TODO: Support comments for struct fields. The Metastore does not properly store

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/StructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/StructType.java b/fe/src/main/java/org/apache/impala/catalog/StructType.java
index 3600b6b..421e74e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/StructType.java
+++ b/fe/src/main/java/org/apache/impala/catalog/StructType.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
 import org.apache.commons.lang3.StringUtils;
 
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TStructField;
+import org.apache.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index f794d7e..6145cc5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.ArrayList;
 import java.util.EnumSet;
@@ -30,19 +30,19 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.analysis.TableName;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TAccessLevel;
-import com.cloudera.impala.thrift.TCatalogObject;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TColumn;
-import com.cloudera.impala.thrift.TColumnDescriptor;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableStats;
-import com.cloudera.impala.util.HdfsCachingUtil;
+import org.apache.impala.analysis.TableName;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TAccessLevel;
+import org.apache.impala.thrift.TCatalogObject;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumn;
+import org.apache.impala.thrift.TColumnDescriptor;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableStats;
+import org.apache.impala.util.HdfsCachingUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/TableId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableId.java b/fe/src/main/java/org/apache/impala/catalog/TableId.java
index 1918029..2b1dabc 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableId.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class TableId extends Id<TableId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
index 78b58f6..764abe0 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.EnumSet;
 import java.util.Set;
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
+import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 
 /**
  * Class that implements the logic for how a table's metadata should be loaded from

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/TableLoadingException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoadingException.java b/fe/src/main/java/org/apache/impala/catalog/TableLoadingException.java
index ab55675..89b405b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoadingException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoadingException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
index 57cc513..5f64b6d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.util.Collections;
 import java.util.HashSet;
@@ -33,8 +33,8 @@ import java.util.concurrent.LinkedBlockingQueue;
 
 import org.apache.log4j.Logger;
 
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.HdfsCachingUtil;
+import org.apache.impala.thrift.TTableName;
+import org.apache.impala.util.HdfsCachingUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/TableNotFoundException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableNotFoundException.java b/fe/src/main/java/org/apache/impala/catalog/TableNotFoundException.java
index 26686d1..7b93a20 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableNotFoundException.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableNotFoundException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 
 /**



[43/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
deleted file mode 100644
index b1f9b95..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
+++ /dev/null
@@ -1,152 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-
-import org.apache.commons.lang3.StringUtils;
-
-import parquet.Strings;
-
-import com.cloudera.impala.analysis.Path.PathType;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.StructType;
-import com.cloudera.impala.catalog.TableLoadingException;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDescribeOutputStyle;
-import com.cloudera.impala.thrift.TDescribeTableParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a DESCRIBE table statement which returns metadata on
- * a specified table:
- * Syntax: DESCRIBE <path>
- *         DESCRIBE FORMATTED|EXTENDED <table>
- *
- * If FORMATTED|EXTENDED is not specified and the path refers to a table, the statement
- * only returns info on the given table's column definition (column name, data type, and
- * comment). If the path refers to a complex typed field within a column, the statement
- * returns the field names, types, and comments.
- * If FORMATTED|EXTENDED is specified, extended metadata on the table is returned
- * (in addition to the column definitions). This metadata includes info about the table
- * properties, SerDe properties, StorageDescriptor properties, and more.
- */
-public class DescribeTableStmt extends StatementBase {
-  private final TDescribeOutputStyle outputStyle_;
-
-  /// "."-separated path from the describe statement.
-  private ArrayList<String> rawPath_;
-
-  /// The resolved path to describe, set after analysis.
-  private Path path_;
-
-  /// The fully qualified name of the root table, set after analysis.
-  private TableName tableName_;
-
-  /// Struct type with the fields to display for the described path.
-  private StructType resultStruct_;
-
-  public DescribeTableStmt(ArrayList<String> rawPath, TDescribeOutputStyle outputStyle) {
-    Preconditions.checkNotNull(rawPath);
-    Preconditions.checkArgument(!rawPath.isEmpty());
-    rawPath_ = rawPath;
-    outputStyle_ = outputStyle;
-    path_ = null;
-    tableName_ = null;
-    resultStruct_ = null;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DESCRIBE ");
-    if (outputStyle_ != TDescribeOutputStyle.MINIMAL) {
-      sb.append(outputStyle_.toString() + " ");
-    }
-    return sb.toString() + StringUtils.join(rawPath_, ".");
-  }
-
-  public TableName getTableName() { return tableName_; }
-  public TDescribeOutputStyle getOutputStyle() { return outputStyle_; }
-
-
-  /**
-   * Get the privilege requirement, which depends on the output style.
-   */
-  private Privilege getPrivilegeRequirement() {
-    switch (outputStyle_) {
-      case MINIMAL: return Privilege.ANY;
-      case FORMATTED:
-      case EXTENDED:
-        return Privilege.VIEW_METADATA;
-      default:
-        Preconditions.checkArgument(false);
-        return null;
-    }
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    try {
-      path_ = analyzer.resolvePath(rawPath_, PathType.ANY);
-    } catch (AnalysisException ae) {
-      // Register privilege requests to prefer reporting an authorization error over
-      // an analysis error. We should not accidentally reveal the non-existence of a
-      // table/database if the user is not authorized.
-      if (analyzer.hasMissingTbls()) throw ae;
-      if (rawPath_.size() > 1) {
-        analyzer.registerPrivReq(new PrivilegeRequestBuilder()
-            .onTable(rawPath_.get(0), rawPath_.get(1))
-            .allOf(getPrivilegeRequirement()).toRequest());
-      }
-      analyzer.registerPrivReq(new PrivilegeRequestBuilder()
-          .onTable(analyzer.getDefaultDb(), rawPath_.get(0))
-          .allOf(getPrivilegeRequirement()).toRequest());
-      throw ae;
-    } catch (TableLoadingException tle) {
-      throw new AnalysisException(tle.getMessage(), tle);
-    }
-
-    tableName_ = analyzer.getFqTableName(path_.getRootTable().getTableName());
-    analyzer.getTable(tableName_, getPrivilegeRequirement());
-
-    if (path_.destTable() != null) {
-      resultStruct_ = path_.getRootTable().getHiveColumnsAsStruct();
-    } else if (path_.destType().isComplexType()) {
-      if (outputStyle_ == TDescribeOutputStyle.FORMATTED ||
-          outputStyle_ == TDescribeOutputStyle.EXTENDED) {
-        throw new AnalysisException("DESCRIBE FORMATTED|EXTENDED must refer to a table");
-      }
-      Preconditions.checkState(outputStyle_ == TDescribeOutputStyle.MINIMAL);
-      resultStruct_ = Path.getTypeAsStruct(path_.destType());
-    } else {
-      throw new AnalysisException("Cannot describe path '" +
-          Strings.join(rawPath_, ".") + "' targeting scalar type: " +
-          path_.destType().toSql());
-    }
-  }
-
-  public TDescribeTableParams toThrift() {
-    TDescribeTableParams params = new TDescribeTableParams();
-    params.setTable_name(getTableName().getTbl());
-    params.setDb(getTableName().getDb());
-    params.setOutput_style(outputStyle_);
-    params.setResult_struct(resultStruct_.toThrift());
-    return params;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java b/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
deleted file mode 100644
index c0d7571..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
+++ /dev/null
@@ -1,198 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.commons.lang.StringUtils;
-
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.IdGenerator;
-import com.cloudera.impala.thrift.TDescriptorTable;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * Repository for tuple (and slot) descriptors.
- * Descriptors should only be created through this class, which assigns
- * them unique ids.
- */
-public class DescriptorTable {
-  private final HashMap<TupleId, TupleDescriptor> tupleDescs_ = Maps.newHashMap();
-  private final HashMap<SlotId, SlotDescriptor> slotDescs_ = Maps.newHashMap();
-  private final IdGenerator<TupleId> tupleIdGenerator_ = TupleId.createGenerator();
-  private final IdGenerator<SlotId> slotIdGenerator_ = SlotId.createGenerator();
-  // List of referenced tables with no associated TupleDescriptor to ship to the BE.
-  // For example, the output table of an insert query.
-  private final List<Table> referencedTables_ = Lists.newArrayList();
-  // For each table, the set of partitions that are referenced by at least one scan range.
-  private final HashMap<Table, HashSet<Long>> referencedPartitionsPerTable_ =
-      Maps.newHashMap();
-
-  public TupleDescriptor createTupleDescriptor(String debugName) {
-    TupleDescriptor d = new TupleDescriptor(tupleIdGenerator_.getNextId(), debugName);
-    tupleDescs_.put(d.getId(), d);
-    return d;
-  }
-
-  /**
-   * Create copy of src with new id. The returned descriptor has its mem layout
-   * computed.
-   */
-  public TupleDescriptor copyTupleDescriptor(TupleId srcId, String debugName) {
-    TupleDescriptor d = new TupleDescriptor(tupleIdGenerator_.getNextId(), debugName);
-    tupleDescs_.put(d.getId(), d);
-    // create copies of slots
-    TupleDescriptor src = tupleDescs_.get(srcId);
-    for (SlotDescriptor slot: src.getSlots()) {
-      copySlotDescriptor(d, slot);
-    }
-    d.computeMemLayout();
-    Preconditions.checkState(d.getByteSize() == src.getByteSize());
-    return d;
-  }
-
-  public SlotDescriptor addSlotDescriptor(TupleDescriptor d) {
-    SlotDescriptor result = new SlotDescriptor(slotIdGenerator_.getNextId(), d);
-    d.addSlot(result);
-    slotDescs_.put(result.getId(), result);
-    return result;
-  }
-
-  /**
-   * Append copy of src to dest.
-   */
-  public SlotDescriptor copySlotDescriptor(TupleDescriptor dest, SlotDescriptor src) {
-    SlotDescriptor result = new SlotDescriptor(slotIdGenerator_.getNextId(), dest, src);
-    dest.addSlot(result);
-    slotDescs_.put(result.getId(), result);
-    return result;
-  }
-
-  public TupleDescriptor getTupleDesc(TupleId id) { return tupleDescs_.get(id); }
-  public SlotDescriptor getSlotDesc(SlotId id) { return slotDescs_.get(id); }
-  public Collection<TupleDescriptor> getTupleDescs() { return tupleDescs_.values(); }
-  public Collection<SlotDescriptor> getSlotDescs() { return slotDescs_.values(); }
-  public TupleId getMaxTupleId() { return tupleIdGenerator_.getMaxId(); }
-  public SlotId getMaxSlotId() { return slotIdGenerator_.getMaxId(); }
-
-  public void addReferencedTable(Table table) {
-    referencedTables_.add(table);
-  }
-
-  /**
-   * Find the set of referenced partitions for the given table.  Allocates a set if
-   * none has been allocated for the table yet.
-   */
-  private HashSet<Long> getReferencedPartitions(Table table) {
-    HashSet<Long> refPartitions = referencedPartitionsPerTable_.get(table);
-    if (refPartitions == null) {
-      refPartitions = new HashSet<Long>();
-      referencedPartitionsPerTable_.put(table, refPartitions);
-    }
-    return refPartitions;
-  }
-
-  /**
-   * Add the partition with ID partitionId to the set of referenced partitions for the
-   * given table.
-   */
-  public void addReferencedPartition(Table table, long partitionId) {
-    getReferencedPartitions(table).add(partitionId);
-  }
-
-  /**
-   * Marks all slots in list as materialized.
-   */
-  public void markSlotsMaterialized(List<SlotId> ids) {
-    for (SlotId id: ids) {
-      getSlotDesc(id).setIsMaterialized(true);
-    }
-  }
-
-  /**
-   * Return all ids in slotIds that belong to tupleId.
-   */
-  public List<SlotId> getTupleSlotIds(List<SlotId> slotIds, TupleId tupleId) {
-    List<SlotId> result = Lists.newArrayList();
-    for (SlotId id: slotIds) {
-      if (getSlotDesc(id).getParent().getId().equals(tupleId)) result.add(id);
-    }
-    return result;
-  }
-
-  // Computes physical layout parameters of all descriptors.
-  // Call this only after the last descriptor was added.
-  // Test-only.
-  public void computeMemLayout() {
-    for (TupleDescriptor d: tupleDescs_.values()) {
-      d.computeMemLayout();
-    }
-  }
-
-  public TDescriptorTable toThrift() {
-    TDescriptorTable result = new TDescriptorTable();
-    HashSet<Table> referencedTbls = Sets.newHashSet();
-    HashSet<Table> allPartitionsTbls = Sets.newHashSet();
-    for (TupleDescriptor tupleDesc: tupleDescs_.values()) {
-      // inline view of a non-constant select has a non-materialized tuple descriptor
-      // in the descriptor table just for type checking, which we need to skip
-      if (tupleDesc.isMaterialized()) {
-        // TODO: Ideally, we should call tupleDesc.checkIsExecutable() here, but there
-        // currently are several situations in which we send materialized tuples without
-        // a mem layout to the BE, e.g., when unnesting unions or when replacing plan
-        // trees with an EmptySetNode.
-        result.addToTupleDescriptors(tupleDesc.toThrift());
-        Table table = tupleDesc.getTable();
-        if (table != null && !(table instanceof View)) referencedTbls.add(table);
-        // Only serialize materialized slots
-        for (SlotDescriptor slotD: tupleDesc.getMaterializedSlots()) {
-          result.addToSlotDescriptors(slotD.toThrift());
-        }
-      }
-    }
-    for (Table table: referencedTables_) {
-      referencedTbls.add(table);
-      // We don't know which partitions are needed for INSERT, so include them all.
-      allPartitionsTbls.add(table);
-    }
-    for (Table tbl: referencedTbls) {
-      HashSet<Long> referencedPartitions = null; // null means include all partitions.
-      if (!allPartitionsTbls.contains(tbl)) {
-        referencedPartitions = getReferencedPartitions(tbl);
-      }
-      result.addToTableDescriptors(tbl.toThriftDescriptor(referencedPartitions));
-    }
-    return result;
-  }
-
-  public String debugString() {
-    StringBuilder out = new StringBuilder();
-    out.append("tuples:\n");
-    for (TupleDescriptor desc: tupleDescs_.values()) {
-      out.append(desc.debugString() + "\n");
-    }
-    return out.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DistributeParam.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DistributeParam.java b/fe/src/main/java/com/cloudera/impala/analysis/DistributeParam.java
deleted file mode 100644
index e718d6b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DistributeParam.java
+++ /dev/null
@@ -1,199 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.List;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDistributeByHashParam;
-import com.cloudera.impala.thrift.TDistributeByRangeParam;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.thrift.TDistributeType;
-import com.cloudera.impala.thrift.TRangeLiteral;
-import com.cloudera.impala.thrift.TRangeLiteralList;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Represents the information of
- *
- * DISTRIBUTE BY HASH[(col_def_list)] INTO n BUCKETS
- * DISTRIBUTE BY RANGE[(col_def_list)] SPLIT ROWS ( (v1,v2,v3), ...)
- *
- * clauses in CREATE TABLE statements, where available, e.g. Kudu.
- *
- * A table can be hash or range partitioned, or combinations of both. A distribute
- * clause represents one particular distribution rule. For both HASH and RANGE types,
- * some of the error checking is done during the analysis, but most of it is deferred
- * until the table is actually created.
-  */
-public class DistributeParam implements ParseNode {
-
-  /**
-   * Creates a DistributeParam partitioned by hash.
-   */
-  public static DistributeParam createHashParam(List<String> cols, BigDecimal buckets) {
-    return new DistributeParam(Type.HASH, cols, buckets);
-  }
-
-  /**
-   * Creates a DistributeParam partitioned by range.
-   */
-  public static DistributeParam createRangeParam(List<String> cols,
-      ArrayList<ArrayList<LiteralExpr>> splitRows) {
-    return new DistributeParam(Type.RANGE, cols, splitRows);
-  }
-
-  private static final int NO_BUCKETS = -1;
-
-  /**
-   * The type of the distribution rule.
-   */
-  public enum Type {
-    HASH, RANGE
-  };
-
-  private List<String> columns_;
-
-  private final Type type_;
-
-  // Only relevant for hash partitioning, -1 otherwise
-  private final int num_buckets_;
-
-  // Only relevant for range partitioning, null otherwise
-  private final ArrayList<ArrayList<LiteralExpr>> splitRows_;
-
-  // Set in analyze()
-  private TDistributeByRangeParam rangeParam_;
-
-  private DistributeParam(Type t, List<String> cols, BigDecimal buckets) {
-    type_ = t;
-    columns_ = cols;
-    num_buckets_ = buckets.intValue();
-    splitRows_ = null;
-  }
-
-  private DistributeParam(Type t, List<String> cols,
-      ArrayList<ArrayList<LiteralExpr>> splitRows) {
-    type_ = t;
-    columns_ = cols;
-    splitRows_ = splitRows;
-    num_buckets_ = NO_BUCKETS;
-  }
-
-  /**
-   * TODO Refactor the logic below to analyze 'columns_'. This analysis should output
-   * a vector of column types that would then be used during the analysis of the split
-   * rows.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (type_ == Type.HASH && num_buckets_ <= 1) {
-      throw new AnalysisException(String.format(
-          "Number of buckets in DISTRIBUTE BY clause '%s' must be larger than 1.",
-          toSql()));
-    } else if (type_ == Type.RANGE) {
-      // Creating the thrift structure simultaneously checks for semantic errors
-      rangeParam_ = new TDistributeByRangeParam();
-      rangeParam_.setColumns(columns_);
-
-      for (ArrayList<LiteralExpr> splitRow : splitRows_) {
-        TRangeLiteralList list = new TRangeLiteralList();
-        if (splitRow.size() != columns_.size()) {
-          throw new AnalysisException(String.format(
-              "SPLIT ROWS has different size than number of projected key columns: %d. "
-                  + "Split row: %s", columns_.size(), splitRowToString(splitRow)));
-        }
-        for (LiteralExpr expr : splitRow) {
-          expr.analyze(analyzer);
-          TRangeLiteral literal = new TRangeLiteral();
-          if (expr instanceof NumericLiteral) {
-            NumericLiteral num = (NumericLiteral) expr;
-            if (num.getType().isDecimal() || num.getType().isFloatingPointType()) {
-              throw new AnalysisException("Only integral and string values allowed for" +
-                  " split rows.");
-            } else {
-              literal.setInt_literal(num.getIntValue());
-            }
-          } else if (expr instanceof StringLiteral) {
-            StringLiteral string = (StringLiteral) expr;
-            literal.setString_literal(string.getStringValue());
-          } else if (expr instanceof BoolLiteral) {
-            BoolLiteral bool = (BoolLiteral) expr;
-            literal.setBool_literal(bool.getValue());
-          } else {
-            throw new AnalysisException(String.format("Split row value is not supported: "
-                + "%s (Type: %s).", expr.getStringValue(), expr.getType().toSql()));
-          }
-          list.addToValues(literal);
-        }
-        rangeParam_.addToSplit_rows(list);
-      }
-    }
-  }
-
-  @Override
-  public String toSql() {
-    if (num_buckets_ == NO_BUCKETS) {
-      List<String> splitRowStrings = Lists.newArrayList();
-      for (ArrayList<LiteralExpr> splitRow : splitRows_) {
-        splitRowStrings.add(splitRowToString(splitRow));
-      }
-      return String.format("RANGE(%s) INTO RANGES(%s)", Joiner.on(", ").join(columns_),
-          Joiner.on(", ").join(splitRowStrings));
-    } else {
-      return String.format("HASH(%s) INTO %d BUCKETS", Joiner.on(", ").join(columns_),
-          num_buckets_);
-    }
-  }
-
-  private String splitRowToString(ArrayList<LiteralExpr> splitRow) {
-    StringBuilder builder = new StringBuilder();
-    builder.append("(");
-    List<String> rangeElementStrings = Lists.newArrayList();
-    for (LiteralExpr rangeElement : splitRow) {
-      rangeElementStrings.add(rangeElement.toSql());
-    }
-    builder.append(Joiner.on(", ").join(rangeElementStrings));
-    builder.append(")");
-    return builder.toString();
-  }
-
-  TDistributeParam toThrift() {
-    TDistributeParam result = new TDistributeParam();
-    if (type_ == Type.HASH) {
-      TDistributeByHashParam hash = new TDistributeByHashParam();
-      hash.setNum_buckets(num_buckets_);
-      hash.setColumns(columns_);
-      result.setBy_hash_param(hash);
-    } else {
-      Preconditions.checkState(type_ == Type.RANGE);
-
-      result.setBy_range_param(rangeParam_);
-    }
-    return result;
-  }
-
-  public List<String> getColumns() { return columns_; }
-  public void setColumns(List<String> cols) { columns_ = cols; }
-  public Type getType_() { return type_; }
-  public int getNumBuckets() { return num_buckets_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DropDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DropDataSrcStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DropDataSrcStmt.java
deleted file mode 100644
index f5642fa..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DropDataSrcStmt.java
+++ /dev/null
@@ -1,61 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropDataSourceParams;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a DROP DATA SOURCE statement.
- */
-public class DropDataSrcStmt extends StatementBase {
-
-  private final String dataSrcName_;
-  private final boolean ifExists_;
-
-  public DropDataSrcStmt(String dataSrcName, boolean ifExists) {
-    Preconditions.checkNotNull(dataSrcName);
-    this.dataSrcName_ = dataSrcName.toLowerCase();
-    this.ifExists_ = ifExists;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_) ||
-        (!ifExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) == null)) {
-      throw new AnalysisException(Analyzer.DATA_SRC_DOES_NOT_EXIST_ERROR_MSG +
-          dataSrcName_);
-    }
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("DROP DATA SOURCE ");
-    if (ifExists_) sb.append("IF EXISTS ");
-    sb.append(dataSrcName_);
-    return sb.toString();
-  }
-
-  public TDropDataSourceParams toThrift() {
-    return new TDropDataSourceParams(dataSrcName_).setIf_exists(ifExists_);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DropDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DropDbStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DropDbStmt.java
deleted file mode 100644
index af7fae1..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DropDbStmt.java
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropDbParams;
-
-/**
- * Represents a DROP [IF EXISTS] DATABASE [CASCADE | RESTRICT] statement
- */
-public class DropDbStmt extends StatementBase {
-  private final String dbName_;
-  private final boolean ifExists_;
-  private final boolean cascade_;
-
-  /**
-   * Constructor for building the drop statement. If ifExists is true, an error will not
-   * be thrown if the database does not exist. If cascade is true, all the tables in the
-   * database will be dropped.
-   */
-  public DropDbStmt(String dbName, boolean ifExists, boolean cascade) {
-    this.dbName_ = dbName;
-    this.ifExists_ = ifExists;
-    this.cascade_ = cascade;
-  }
-
-  public String getDb() { return dbName_; }
-  public boolean getIfExists() { return ifExists_; }
-  public boolean getCascade() { return cascade_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DROP DATABASE");
-    if (ifExists_) sb.append(" IF EXISTS ");
-    sb.append(getDb());
-    if (cascade_) sb.append(" CASCADE");
-    return sb.toString();
-  }
-
-  public TDropDbParams toThrift() {
-    TDropDbParams params = new TDropDbParams();
-    params.setDb(getDb());
-    params.setIf_exists(getIfExists());
-    params.setCascade(getCascade());
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    Db db = analyzer.getDb(dbName_, Privilege.DROP, false);
-    if (db == null && !ifExists_) {
-      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + dbName_);
-    }
-
-    if (analyzer.getDefaultDb().toLowerCase().equals(dbName_.toLowerCase())) {
-      throw new AnalysisException("Cannot drop current default database: " + dbName_);
-    }
-    if (db != null && db.numFunctions() > 0 && !cascade_) {
-      throw new AnalysisException("Cannot drop non-empty database: " + dbName_);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DropFunctionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DropFunctionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DropFunctionStmt.java
deleted file mode 100644
index 39f5ff9..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DropFunctionStmt.java
+++ /dev/null
@@ -1,113 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.AuthorizeableFn;
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequest;
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropFunctionParams;
-import com.cloudera.impala.thrift.TFunctionCategory;
-
-import java.util.ArrayList;
-
-/**
- * Represents a DROP [IF EXISTS] FUNCTION statement
- * TODO: try to consolidate this with the other Drop*Stmt class, perhaps
- * by adding a DropStatementBase class.
- */
-public class DropFunctionStmt extends StatementBase {
-  private final FunctionName fnName_;
-  private final FunctionArgs fnArgs_;
-  private final boolean ifExists_;
-
-  // Set in analyze().
-  private Function desc_;
-
-  /**
-   * Constructor for building the drop statement. If ifExists is true, an error will not
-   * be thrown if the function does not exist.
-   */
-  public DropFunctionStmt(FunctionName fnName, FunctionArgs fnArgs, boolean ifExists) {
-    fnName_ = fnName;
-    fnArgs_ = fnArgs;
-    ifExists_ = ifExists;
-  }
-
-  public FunctionName getFunction() { return desc_.getFunctionName(); }
-  public boolean getIfExists() { return ifExists_; }
-  private boolean hasSignature() { return fnArgs_ != null; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DROP FUNCTION");
-    if (ifExists_) sb.append(" IF EXISTS ");
-    sb.append(desc_.signatureString());
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public TDropFunctionParams toThrift() {
-    TDropFunctionParams params = new TDropFunctionParams();
-    params.setFn_name(desc_.getFunctionName().toThrift());
-    params.setArg_types(Type.toThrift(desc_.getArgs()));
-    params.setIf_exists(getIfExists());
-    if (hasSignature()) params.setSignature(desc_.signatureString());
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    fnName_.analyze(analyzer);
-
-    if (hasSignature()) {
-      fnArgs_.analyze(analyzer);
-      desc_ = new Function(fnName_, fnArgs_.getArgTypes(), Type.INVALID,
-          fnArgs_.hasVarArgs());
-    } else {
-      desc_ = new Function(fnName_, new ArrayList<Type>(), Type.INVALID,
-          false);
-    }
-
-    // For now, if authorization is enabled, the user needs ALL on the server
-    // to drop functions.
-    // TODO: this is not the right granularity but acceptable for now.
-    analyzer.registerPrivReq(new PrivilegeRequest(
-        new AuthorizeableFn(desc_.signatureString()), Privilege.ALL));
-
-    Db db =  analyzer.getDb(desc_.dbName(), Privilege.DROP, false);
-    if (db == null && !ifExists_) {
-      throw new AnalysisException(Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + desc_.dbName());
-    }
-
-    if (!hasSignature() && db != null && db.getFunctions(
-        desc_.functionName()).isEmpty() && !ifExists_) {
-      throw new AnalysisException(
-          Analyzer.FN_DOES_NOT_EXIST_ERROR_MSG + desc_.functionName());
-    }
-
-    if (hasSignature() && analyzer.getCatalog().getFunction(
-        desc_, Function.CompareMode.IS_IDENTICAL) == null && !ifExists_) {
-      throw new AnalysisException(
-          Analyzer.FN_DOES_NOT_EXIST_ERROR_MSG + desc_.signatureString());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DropStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DropStatsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DropStatsStmt.java
deleted file mode 100644
index 90f9434..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DropStatsStmt.java
+++ /dev/null
@@ -1,105 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropStatsParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents both a DROP STATS statement, and the DROP INCREMENTAL STATS <tbl> PARTITION
- * <part_spec> variant.
- */
-public class DropStatsStmt extends StatementBase {
-  protected final TableName tableName_;
-
-  // If non-null, only drop the statistics for a given partition
-  PartitionSpec partitionSpec_ = null;
-
-  // Set during analysis
-  protected String dbName_;
-
-  /**
-   * Constructor for building the DROP TABLE/VIEW statement
-   */
-  public DropStatsStmt(TableName tableName) {
-    this.tableName_ = tableName;
-  }
-
-  public DropStatsStmt(TableName tableName, PartitionSpec partSpec) {
-    this.tableName_ = tableName;
-    this.partitionSpec_ = partSpec;
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DROP ");
-    if (partitionSpec_ == null) {
-      sb.append(" STATS ");
-      if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
-      sb.append(tableName_.toSql());
-    } else {
-      sb.append(" INCREMENTAL STATS ");
-      if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
-      sb.append(tableName_.toSql());
-      sb.append(partitionSpec_.toSql());
-    }
-    return sb.toString();
-  }
-
-  public TDropStatsParams toThrift() {
-    TDropStatsParams params = new TDropStatsParams();
-    params.setTable_name(new TTableName(getDb(), getTbl()));
-
-    if (partitionSpec_ != null) {
-      params.setPartition_spec(partitionSpec_.toThrift());
-    }
-    return params;
-  }
-
-  /**
-   * Checks that the given table exists and the user has privileges
-   * to drop stats on this table.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.ALTER);
-    Preconditions.checkNotNull(table);
-    if (partitionSpec_ != null) {
-      partitionSpec_.setTableName(tableName_);
-      partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-      partitionSpec_.setPartitionShouldExist();
-      partitionSpec_.analyze(analyzer);
-    }
-  }
-
-  /**
-   * Can only be called after analysis. Returns the name of the database that
-   * the target drop table resides in.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  public String getTbl() { return tableName_.getTbl(); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/DropTableOrViewStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DropTableOrViewStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DropTableOrViewStmt.java
deleted file mode 100644
index 8371ace..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/DropTableOrViewStmt.java
+++ /dev/null
@@ -1,115 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TDropTableOrViewParams;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents a DROP TABLE/VIEW [IF EXISTS] statement
- */
-public class DropTableOrViewStmt extends StatementBase {
-  protected final TableName tableName_;
-  protected final boolean ifExists_;
-
-  // True if we are dropping a table. False if we are dropping a view.
-  protected final boolean dropTable_;
-
-  // Setting this value causes dropped tables to be permanently
-  // deleted. For example, for hdfs tables it skips the trash directory
-  protected final boolean purgeTable_;
-
-  // Set during analysis
-  protected String dbName_;
-
-  /**
-   * Constructor for building the DROP TABLE/VIEW statement
-   */
-  public DropTableOrViewStmt(TableName tableName, boolean ifExists,
-      boolean dropTable, boolean purgeTable) {
-    tableName_ = tableName;
-    ifExists_ = ifExists;
-    dropTable_ = dropTable;
-    purgeTable_ = purgeTable;
-    // PURGE with a view is not allowed.
-    Preconditions.checkState(!(!dropTable_ && purgeTable_));
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("DROP " + ((dropTable_) ? "TABLE " : "VIEW "));
-    if (ifExists_) sb.append("IF EXISTS ");
-    if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
-    sb.append(tableName_.getTbl());
-    if (purgeTable_) sb.append(" PURGE");
-    return sb.toString();
-  }
-
-  public TDropTableOrViewParams toThrift() {
-    TDropTableOrViewParams params = new TDropTableOrViewParams();
-    params.setTable_name(new TTableName(getDb(), getTbl()));
-    params.setIf_exists(ifExists_);
-    params.setPurge(purgeTable_);
-    params.setIs_table(dropTable_);
-    return params;
-  }
-
-  /**
-   * 1. Checks that the user has privileges to DROP the given table/view
-   * 2. Checks that the database and table exists
-   * 3. Checks that the table type (TABLE/VIEW) matches the DROP TABLE/VIEW statement
-   * Note: Do not analyze tableName because we prefer to report an error indicating
-   * that the table/view does not exist even if the table/view name is invalid.
-   */
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    try {
-      Table table = analyzer.getTable(tableName_, Privilege.DROP);
-      Preconditions.checkNotNull(table);
-      if (table instanceof View && dropTable_) {
-        throw new AnalysisException(String.format(
-            "DROP TABLE not allowed on a view: %s.%s", dbName_, getTbl()));
-      }
-      if (!(table instanceof View) && !dropTable_) {
-        throw new AnalysisException(String.format(
-            "DROP VIEW not allowed on a table: %s.%s", dbName_, getTbl()));
-      }
-    } catch (AnalysisException e) {
-      if (ifExists_ && analyzer.getMissingTbls().isEmpty()) return;
-      throw e;
-    }
-  }
-
-  /**
-   * Can only be called after analysis. Returns the name of the database that
-   * the target drop table resides in.
-   */
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  public String getTbl() { return tableName_.getTbl(); }
-  public boolean isDropTable() { return dropTable_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/EquivalenceClassId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/EquivalenceClassId.java b/fe/src/main/java/com/cloudera/impala/analysis/EquivalenceClassId.java
deleted file mode 100644
index df658b9..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/EquivalenceClassId.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
-
-public class EquivalenceClassId extends Id<EquivalenceClassId> {
-  // Construction only allowed via an IdGenerator.
-  protected EquivalenceClassId(int id) {
-    super(id);
-  }
-
-  public static IdGenerator<EquivalenceClassId> createGenerator() {
-    return new IdGenerator<EquivalenceClassId>() {
-      @Override
-      public EquivalenceClassId getNextId() { return new EquivalenceClassId(nextId_++); }
-      @Override
-      public EquivalenceClassId getMaxId() { return new EquivalenceClassId(nextId_ - 1); }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/ExistsPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ExistsPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/ExistsPredicate.java
deleted file mode 100644
index da984eb..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/ExistsPredicate.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.google.common.base.Preconditions;
-
-/**
- * Class representing a [NOT] EXISTS predicate.
- */
-public class ExistsPredicate extends Predicate {
-  private final static Logger LOG = LoggerFactory.getLogger(
-      ExistsPredicate.class);
-  private boolean notExists_ = false;
-
-  public boolean isNotExists() { return notExists_; }
-
-  /**
-   * C'tor that initializes an ExistsPredicate from a Subquery.
-   */
-  public ExistsPredicate(Subquery subquery, boolean notExists) {
-    Preconditions.checkNotNull(subquery);
-    children_.add(subquery);
-    notExists_ = notExists;
-  }
-
-  @Override
-  public Expr negate() {
-    return new ExistsPredicate((Subquery)getChild(0), !notExists_);
-  }
-
-  /**
-   * Copy c'tor used in clone.
-   */
-  public ExistsPredicate(ExistsPredicate other) {
-    super(other);
-    notExists_ = other.notExists_;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    // Cannot serialize a nested predicate
-    Preconditions.checkState(false);
-  }
-
-  @Override
-  public Expr clone() { return new ExistsPredicate(this); }
-
-  @Override
-  public String toSqlImpl() {
-    StringBuilder strBuilder = new StringBuilder();
-    if (notExists_) strBuilder.append("NOT ");
-    strBuilder.append("EXISTS ");
-    strBuilder.append(getChild(0).toSql());
-    return strBuilder.toString();
-  }
-}



[06/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java b/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
new file mode 100644
index 0000000..bf8b0ea
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ArithmeticExpr.java
@@ -0,0 +1,268 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+public class ArithmeticExpr extends Expr {
+  enum OperatorPosition {
+    BINARY_INFIX,
+    UNARY_PREFIX,
+    UNARY_POSTFIX,
+  }
+
+  enum Operator {
+    MULTIPLY("*", "multiply", OperatorPosition.BINARY_INFIX),
+    DIVIDE("/", "divide", OperatorPosition.BINARY_INFIX),
+    MOD("%", "mod", OperatorPosition.BINARY_INFIX),
+    INT_DIVIDE("DIV", "int_divide", OperatorPosition.BINARY_INFIX),
+    ADD("+", "add", OperatorPosition.BINARY_INFIX),
+    SUBTRACT("-", "subtract", OperatorPosition.BINARY_INFIX),
+    BITAND("&", "bitand", OperatorPosition.BINARY_INFIX),
+    BITOR("|", "bitor", OperatorPosition.BINARY_INFIX),
+    BITXOR("^", "bitxor", OperatorPosition.BINARY_INFIX),
+    BITNOT("~", "bitnot", OperatorPosition.UNARY_PREFIX),
+    FACTORIAL("!", "factorial", OperatorPosition.UNARY_POSTFIX);
+
+    private final String description_;
+    private final String name_;
+    private final OperatorPosition pos_;
+
+    private Operator(String description, String name, OperatorPosition pos) {
+      this.description_ = description;
+      this.name_ = name;
+      this.pos_ = pos;
+    }
+
+    @Override
+    public String toString() { return description_; }
+    public String getName() { return name_; }
+    public OperatorPosition getPos() { return pos_; }
+
+    public boolean isUnary() {
+      return pos_ == OperatorPosition.UNARY_PREFIX ||
+             pos_ == OperatorPosition.UNARY_POSTFIX;
+    }
+
+    public boolean isBinary() {
+      return pos_ == OperatorPosition.BINARY_INFIX;
+    }
+  }
+
+  private final Operator op_;
+
+  public Operator getOp() { return op_; }
+
+  public ArithmeticExpr(Operator op, Expr e1, Expr e2) {
+    super();
+    this.op_ = op;
+    Preconditions.checkNotNull(e1);
+    children_.add(e1);
+    Preconditions.checkArgument((op.isUnary() && e2 == null) ||
+        (op.isBinary() && e2 != null));
+    if (e2 != null) children_.add(e2);
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected ArithmeticExpr(ArithmeticExpr other) {
+    super(other);
+    op_ = other.op_;
+  }
+
+  public static void initBuiltins(Db db) {
+    for (Type t: Type.getNumericTypes()) {
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.MULTIPLY.getName(), Lists.newArrayList(t, t), t));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.ADD.getName(), Lists.newArrayList(t, t), t));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.SUBTRACT.getName(), Lists.newArrayList(t, t), t));
+    }
+    db.addBuiltin(ScalarFunction.createBuiltinOperator(
+        Operator.DIVIDE.getName(),
+        Lists.<Type>newArrayList(Type.DOUBLE, Type.DOUBLE),
+        Type.DOUBLE));
+    db.addBuiltin(ScalarFunction.createBuiltinOperator(
+        Operator.DIVIDE.getName(),
+        Lists.<Type>newArrayList(Type.DECIMAL, Type.DECIMAL),
+        Type.DECIMAL));
+
+    /*
+     * MOD(), FACTORIAL(), BITAND(), BITOR(), BITXOR(), and BITNOT() are registered as
+     * builtins, see impala_functions.py
+     */
+    for (Type t: Type.getIntegerTypes()) {
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.INT_DIVIDE.getName(), Lists.newArrayList(t, t), t));
+    }
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("op", op_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  public String toSqlImpl() {
+    if (children_.size() == 1) {
+      if (op_.getPos() == OperatorPosition.UNARY_PREFIX) {
+        return op_.toString() + getChild(0).toSql();
+      } else {
+        assert(op_.getPos() == OperatorPosition.UNARY_POSTFIX);
+        return getChild(0).toSql() + op_.toString();
+      }
+    } else {
+      Preconditions.checkState(children_.size() == 2);
+      return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
+    }
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    msg.node_type = TExprNodeType.FUNCTION_CALL;
+  }
+
+  /**
+   * Inserts a cast from child[childIdx] to targetType if one is necessary.
+   * Note this is different from Expr.castChild() since arithmetic for decimals
+   * the cast is handled as part of the operator and in general, the return type
+   * does not match the input types.
+   */
+  void castChild(int childIdx, Type targetType) throws AnalysisException {
+    Type t = getChild(childIdx).getType();
+    if (t.matchesType(targetType)) return;
+    if (targetType.isDecimal() && !t.isNull()) {
+      Preconditions.checkState(t.isScalarType());
+      targetType = ((ScalarType) t).getMinResolutionDecimal();
+    }
+    castChild(targetType, childIdx);
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+    for (Expr child: children_) {
+      Expr operand = (Expr) child;
+      if (!operand.type_.isNumericType() && !operand.type_.isNull()) {
+        String errMsg = "Arithmetic operation requires numeric operands: " + toSql();
+        if (operand instanceof Subquery && !operand.type_.isScalarType()) {
+          errMsg = "Subquery must return a single row: " + operand.toSql();
+        }
+        throw new AnalysisException(errMsg);
+      }
+    }
+
+    convertNumericLiteralsFromDecimal(analyzer);
+    Type t0 = getChild(0).getType();
+    Type t1 = null;
+    if (op_.isUnary()) {
+      Preconditions.checkState(children_.size() == 1);
+    } else if (op_.isBinary()) {
+      Preconditions.checkState(children_.size() == 2);
+      t1 = getChild(1).getType();
+    }
+    if (hasChildCosts()) evalCost_ = getChildCosts() + ARITHMETIC_OP_COST;
+
+    String fnName = op_.getName();
+    switch (op_) {
+      case ADD:
+      case SUBTRACT:
+      case DIVIDE:
+      case MULTIPLY:
+      case MOD:
+        type_ = TypesUtil.getArithmeticResultType(t0, t1, op_);
+        // If both of the children are null, we'll default to the DOUBLE version of the
+        // operator. This prevents the BE from seeing NULL_TYPE.
+        if (type_.isNull()) type_ = Type.DOUBLE;
+        break;
+
+      case INT_DIVIDE:
+      case BITAND:
+      case BITOR:
+      case BITXOR:
+        if ((!t0.isNull() & !t0.isIntegerType()) ||
+            (!t1.isNull() && !t1.isIntegerType())) {
+          throw new AnalysisException("Invalid non-integer argument to operation '" +
+              op_.toString() + "': " + this.toSql());
+        }
+        type_ = Type.getAssignmentCompatibleType(t0, t1, false);
+        // If both of the children are null, we'll default to the INT version of the
+        // operator. This prevents the BE from seeing NULL_TYPE.
+        if (type_.isNull()) type_ = Type.INT;
+        Preconditions.checkState(type_.isIntegerType());
+        break;
+      case BITNOT:
+      case FACTORIAL:
+        if (!t0.isNull() && !t0.isIntegerType()) {
+          throw new AnalysisException("'" + op_.toString() + "'" +
+              " operation only allowed on integer types: " + toSql());
+        }
+        // Special-case NULL to resolve to the appropriate type.
+        if (op_ == Operator.BITNOT) {
+          if (t0.isNull()) castChild(0, Type.INT);
+        } else {
+          assert(op_ == Operator.FACTORIAL);
+          if (t0.isNull()) castChild(0, Type.BIGINT);
+        }
+        fn_ = getBuiltinFunction(analyzer, op_.getName(), collectChildReturnTypes(),
+            CompareMode.IS_SUPERTYPE_OF);
+        Preconditions.checkNotNull(fn_);
+        castForFunctionCall(false);
+        type_ = fn_.getReturnType();
+        return;
+      default:
+        // the programmer forgot to deal with a case
+        Preconditions.checkState(false,
+            "Unknown arithmetic operation " + op_.toString() + " in: " + this.toSql());
+        break;
+    }
+
+    // Don't cast from decimal to decimal. The BE function can just handle this.
+    if (!(type_.isDecimal() && t0.isDecimal())) castChild(0, type_);
+    if (!(type_.isDecimal() && t1.isDecimal())) castChild(1, type_);
+    t0 = getChild(0).getType();
+    t1 = getChild(1).getType();
+
+    fn_ = getBuiltinFunction(analyzer, fnName, collectChildReturnTypes(),
+        CompareMode.IS_IDENTICAL);
+    if (fn_ == null) {
+      Preconditions.checkState(false, String.format("No match " +
+          "for '%s' with operand types %s and %s", toSql(), t0, t1));
+    }
+    Preconditions.checkState(type_.matchesType(fn_.getReturnType()));
+  }
+
+  @Override
+  public Expr clone() { return new ArithmeticExpr(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java b/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
new file mode 100644
index 0000000..4e88014
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AuthorizationStmt.java
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.User;
+import com.cloudera.impala.common.AnalysisException;
+import com.google.common.base.Strings;
+
+/**
+ * Base class for all authorization statements - CREATE/DROP/SHOW ROLE, GRANT/REVOKE
+ * ROLE/privilege, etc.
+ */
+public class AuthorizationStmt extends StatementBase {
+  // Set during analysis
+  protected User requestingUser_;
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (!analyzer.getAuthzConfig().isEnabled()) {
+      throw new AnalysisException("Authorization is not enabled. To enable " +
+          "authorization restart Impala with the --server_name=<name> flag.");
+    }
+    if (analyzer.getAuthzConfig().isFileBasedPolicy()) {
+      throw new AnalysisException("Cannot execute authorization statement using a file" +
+          " based policy. To disable file based policies, restart Impala without the " +
+          "-authorization_policy_file flag set.");
+    }
+    if (Strings.isNullOrEmpty(analyzer.getUser().getName())) {
+      throw new AnalysisException("Cannot execute authorization statement with an " +
+          "empty username.");
+    }
+    requestingUser_ = analyzer.getUser();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
new file mode 100644
index 0000000..69780e0
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents a reference to an actual table, such as an Hdfs or HBase table.
+ * BaseTableRefs are instantiated as a result of table resolution during analysis
+ * of a SelectStmt.
+ */
+public class BaseTableRef extends TableRef {
+
+  /**
+   * Create a BaseTableRef from the original unresolved table ref as well as
+   * its resolved path. Sets table aliases and join-related attributes.
+   */
+  public BaseTableRef(TableRef tableRef, Path resolvedPath) {
+    super(tableRef);
+    Preconditions.checkState(resolvedPath.isResolved());
+    Preconditions.checkState(resolvedPath.isRootedAtTable());
+    resolvedPath_ = resolvedPath;
+    // Set implicit aliases if no explicit one was given.
+    if (hasExplicitAlias()) return;
+    aliases_ = new String[] {
+        getTable().getTableName().toString().toLowerCase(),
+        getTable().getName().toLowerCase() };
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  private BaseTableRef(BaseTableRef other) {
+    super(other);
+  }
+
+  /**
+   * Register this table ref and then analyze the Join clause.
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), analyzer);
+    desc_ = analyzer.registerTableRef(this);
+    isAnalyzed_ = true;
+    analyzeHints(analyzer);
+    analyzeJoin(analyzer);
+    analyzeSkipHeaderLineCount();
+  }
+
+  @Override
+  protected String tableRefToSql() {
+    // Enclose the alias in quotes if Hive cannot parse it without quotes.
+    // This is needed for view compatibility between Impala and Hive.
+    String aliasSql = null;
+    String alias = getExplicitAlias();
+    if (alias != null) aliasSql = ToSqlUtils.getIdentSql(alias);
+    String tableHintsSql = ToSqlUtils.getPlanHintsSql(tableHints_);
+    return getTable().getTableName().toSql() +
+        ((aliasSql != null) ? " " + aliasSql : "") +
+        (tableHintsSql != "" ? " " + tableHintsSql : "");
+  }
+
+  public String debugString() { return tableRefToSql(); }
+  @Override
+  protected TableRef clone() { return new BaseTableRef(this); }
+
+  /**
+   * Analyze the 'skip.header.line.count' property.
+   */
+  private void analyzeSkipHeaderLineCount() throws AnalysisException {
+    Table table = getTable();
+    if (!(table instanceof HdfsTable)) return;
+    HdfsTable hdfsTable = (HdfsTable)table;
+
+    StringBuilder error = new StringBuilder();
+    hdfsTable.parseSkipHeaderLineCount(error);
+    if (error.length() > 0) throw new AnalysisException(error.toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java b/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
new file mode 100644
index 0000000..d76a4c6
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/BetweenPredicate.java
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExprNode;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Class describing between predicates. After successful analysis, we rewrite
+ * the between predicate to a conjunctive/disjunctive compound predicate
+ * to be handed to the backend.
+ */
+public class BetweenPredicate extends Predicate {
+
+  private final boolean isNotBetween_;
+
+  // After successful analysis, we rewrite this between predicate
+  // into a conjunctive/disjunctive compound predicate.
+  private CompoundPredicate rewrittenPredicate_;
+
+  // Children of the BetweenPredicate, since this.children should hold the children
+  // of the rewritten predicate to make sure toThrift() picks up the right ones.
+  private ArrayList<Expr> originalChildren_ = Lists.newArrayList();
+
+  // First child is the comparison expr which should be in [lowerBound, upperBound].
+  public BetweenPredicate(Expr compareExpr, Expr lowerBound, Expr upperBound,
+      boolean isNotBetween) {
+    originalChildren_.add(compareExpr);
+    originalChildren_.add(lowerBound);
+    originalChildren_.add(upperBound);
+    this.isNotBetween_ = isNotBetween;
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected BetweenPredicate(BetweenPredicate other) {
+    super(other);
+    isNotBetween_ = other.isNotBetween_;
+    originalChildren_ = Expr.cloneList(other.originalChildren_);
+    if (other.rewrittenPredicate_ != null) {
+      rewrittenPredicate_ = (CompoundPredicate) other.rewrittenPredicate_.clone();
+    }
+  }
+
+  public CompoundPredicate getRewrittenPredicate() {
+    Preconditions.checkState(isAnalyzed_);
+    return rewrittenPredicate_;
+  }
+  public ArrayList<Expr> getOriginalChildren() { return originalChildren_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+    if (originalChildren_.get(0) instanceof Subquery &&
+        (originalChildren_.get(1) instanceof Subquery ||
+         originalChildren_.get(2) instanceof Subquery)) {
+      throw new AnalysisException("Comparison between subqueries is not " +
+          "supported in a between predicate: " + toSqlImpl());
+    }
+    analyzer.castAllToCompatibleType(originalChildren_);
+
+    // Rewrite between predicate into a conjunctive/disjunctive compound predicate.
+    if (isNotBetween_) {
+      // Rewrite into disjunction.
+      Predicate lower = new BinaryPredicate(BinaryPredicate.Operator.LT,
+          originalChildren_.get(0), originalChildren_.get(1));
+      Predicate upper = new BinaryPredicate(BinaryPredicate.Operator.GT,
+          originalChildren_.get(0), originalChildren_.get(2));
+      rewrittenPredicate_ =
+          new CompoundPredicate(CompoundPredicate.Operator.OR, lower, upper);
+    } else {
+      // Rewrite into conjunction.
+      Predicate lower = new BinaryPredicate(BinaryPredicate.Operator.GE,
+          originalChildren_.get(0), originalChildren_.get(1));
+      Predicate upper = new BinaryPredicate(BinaryPredicate.Operator.LE,
+          originalChildren_.get(0), originalChildren_.get(2));
+      rewrittenPredicate_ =
+          new CompoundPredicate(CompoundPredicate.Operator.AND, lower, upper);
+    }
+
+    try {
+      rewrittenPredicate_.analyze(analyzer);
+      fn_ = rewrittenPredicate_.fn_;
+    } catch (AnalysisException e) {
+      // We should have already guaranteed that analysis will succeed.
+      Preconditions.checkState(false, "Analysis failed in rewritten between predicate");
+    }
+
+    // Make sure toThrift() picks up the children of the rewritten predicate.
+    children_ = rewrittenPredicate_.getChildren();
+    // Since the only child is a CompoundPredicate expressing the comparison,
+    // the cost of the comparison is fully captured by the children's cost.
+    evalCost_ = getChildCosts();
+    isAnalyzed_ = true;
+  }
+
+  @Override
+  public List<Expr> getConjuncts() {
+    return rewrittenPredicate_.getConjuncts();
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    rewrittenPredicate_.toThrift(msg);
+  }
+
+  @Override
+  public String toSqlImpl() {
+    String notStr = (isNotBetween_) ? "NOT " : "";
+    return originalChildren_.get(0).toSql() + " " + notStr + "BETWEEN " +
+        originalChildren_.get(1).toSql() + " AND " + originalChildren_.get(2).toSql();
+  }
+
+  /**
+   * Also substitute the exprs in originalChildren when cloning.
+   */
+  @Override
+  protected Expr substituteImpl(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws AnalysisException {
+    BetweenPredicate clone = (BetweenPredicate) super.substituteImpl(smap, analyzer);
+    Preconditions.checkNotNull(clone);
+    clone.originalChildren_ =
+        Expr.substituteList(originalChildren_, smap, analyzer, false);
+    return clone;
+  }
+
+  @Override
+  public Expr clone() { return new BetweenPredicate(this); }
+
+  @Override
+  public Expr reset() {
+    super.reset();
+    originalChildren_ = Expr.resetList(originalChildren_);
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java b/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
new file mode 100644
index 0000000..35d03e1
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/BinaryPredicate.java
@@ -0,0 +1,388 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.Pair;
+import com.cloudera.impala.common.Reference;
+import com.cloudera.impala.extdatasource.thrift.TComparisonOp;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.collect.Lists;
+
+/**
+ * Most predicates with two operands.
+ *
+ */
+public class BinaryPredicate extends Predicate {
+  private final static Logger LOG = LoggerFactory.getLogger(BinaryPredicate.class);
+
+  // true if this BinaryPredicate is inferred from slot equivalences, false otherwise.
+  private boolean isInferred_ = false;
+
+  public enum Operator {
+    EQ("=", "eq", TComparisonOp.EQ),
+    NE("!=", "ne", TComparisonOp.NE),
+    LE("<=", "le", TComparisonOp.LE),
+    GE(">=", "ge", TComparisonOp.GE),
+    LT("<", "lt", TComparisonOp.LT),
+    GT(">", "gt", TComparisonOp.GT),
+    DISTINCT_FROM("IS DISTINCT FROM", "distinctfrom", TComparisonOp.DISTINCT_FROM),
+    NOT_DISTINCT("IS NOT DISTINCT FROM", "notdistinct", TComparisonOp.NOT_DISTINCT),
+    // Same as EQ, except it returns True if the rhs is NULL. There is no backend
+    // function for this. The functionality is embedded in the hash-join
+    // implementation.
+    NULL_MATCHING_EQ("=", "null_matching_eq", TComparisonOp.EQ);
+
+    private final String description_;
+    private final String name_;
+    private final TComparisonOp thriftOp_;
+
+    private Operator(String description, String name, TComparisonOp thriftOp) {
+      this.description_ = description;
+      this.name_ = name;
+      this.thriftOp_ = thriftOp;
+    }
+
+    @Override
+    public String toString() { return description_; }
+    public String getName() { return name_; }
+    public TComparisonOp getThriftOp() { return thriftOp_; }
+    public boolean isEquivalence() { return this == EQ || this == NOT_DISTINCT; }
+
+    public Operator converse() {
+      switch (this) {
+        case EQ: return EQ;
+        case NE: return NE;
+        case LE: return GE;
+        case GE: return LE;
+        case LT: return GT;
+        case GT: return LT;
+        case DISTINCT_FROM: return DISTINCT_FROM;
+        case NOT_DISTINCT: return NOT_DISTINCT;
+        case NULL_MATCHING_EQ:
+          throw new IllegalStateException("Not implemented");
+        default: throw new IllegalStateException("Invalid operator");
+      }
+    }
+  }
+
+  public static void initBuiltins(Db db) {
+    for (Type t: Type.getSupportedTypes()) {
+      if (t.isNull()) continue; // NULL is handled through type promotion.
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.EQ.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.NE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.LE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.GE.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.LT.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          Operator.GT.getName(), Lists.newArrayList(t, t), Type.BOOLEAN));
+    }
+  }
+
+  /**
+   * Normalizes a 'predicate' consisting of an uncast SlotRef and a constant Expr into
+   * the following form: <SlotRef> <Op> <LiteralExpr>
+   * If 'predicate' cannot be expressed in this way, null is returned.
+   */
+  public static BinaryPredicate normalizeSlotRefComparison(BinaryPredicate predicate,
+      Analyzer analyzer) {
+    SlotRef ref = null;
+    if (predicate.getChild(0) instanceof SlotRef) {
+      ref = (SlotRef) predicate.getChild(0);
+    } else if (predicate.getChild(1) instanceof SlotRef) {
+      ref = (SlotRef) predicate.getChild(1);
+    }
+
+    if (ref == null) return null;
+    if (ref != predicate.getChild(0)) {
+      Preconditions.checkState(ref == predicate.getChild(1));
+      predicate = new BinaryPredicate(predicate.getOp().converse(), ref,
+          predicate.getChild(0));
+      predicate.analyzeNoThrow(analyzer);
+    }
+
+    try {
+      predicate.foldConstantChildren(analyzer);
+    } catch (AnalysisException ex) {
+      // Throws if the expression cannot be evaluated by the BE.
+      return null;
+    }
+    predicate.analyzeNoThrow(analyzer);
+    if (!(predicate.getChild(1) instanceof LiteralExpr)) return null;
+    return predicate;
+  }
+
+  private Operator op_;
+
+  public Operator getOp() { return op_; }
+  public void setOp(Operator op) { op_ = op; }
+
+  public BinaryPredicate(Operator op, Expr e1, Expr e2) {
+    super();
+    this.op_ = op;
+    Preconditions.checkNotNull(e1);
+    children_.add(e1);
+    Preconditions.checkNotNull(e2);
+    children_.add(e2);
+  }
+
+  protected BinaryPredicate(BinaryPredicate other) {
+    super(other);
+    op_ = other.op_;
+    isInferred_ = other.isInferred_;
+  }
+
+  public boolean isNullMatchingEq() { return op_ == Operator.NULL_MATCHING_EQ; }
+
+  public boolean isInferred() { return isInferred_; }
+  public void setIsInferred() { isInferred_ = true; }
+
+  @Override
+  public String toSqlImpl() {
+    return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    Preconditions.checkState(children_.size() == 2);
+    // Cannot serialize a nested predicate.
+    Preconditions.checkState(!contains(Subquery.class));
+    // This check is important because we often clone and/or evaluate predicates,
+    // and it's easy to get the casting logic wrong, e.g., cloned predicates
+    // with expr substitutions need to be re-analyzed with reanalyze().
+    Preconditions.checkState(getChild(0).getType().getPrimitiveType() ==
+                             getChild(1).getType().getPrimitiveType(),
+        "child 0 type: " + getChild(0).getType() +
+        " child 1 type: " + getChild(1).getType());
+    msg.node_type = TExprNodeType.FUNCTION_CALL;
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("op", op_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+
+    convertNumericLiteralsFromDecimal(analyzer);
+    String opName = op_.getName().equals("null_matching_eq") ? "eq" : op_.getName();
+    fn_ = getBuiltinFunction(analyzer, opName, collectChildReturnTypes(),
+        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+    if (fn_ == null) {
+      // Construct an appropriate error message and throw an AnalysisException.
+      String errMsg = "operands of type " + getChild(0).getType().toSql() + " and " +
+            getChild(1).getType().toSql()  + " are not comparable: " + toSql();
+
+      // Check if any of the children is a Subquery that does not return a
+      // scalar.
+      for (Expr expr: children_) {
+        if (expr instanceof Subquery && !expr.getType().isScalarType()) {
+          errMsg = "Subquery must return a single row: " + expr.toSql();
+          break;
+        }
+      }
+
+      throw new AnalysisException(errMsg);
+    }
+    Preconditions.checkState(fn_.getReturnType().isBoolean());
+
+    ArrayList<Expr> subqueries = Lists.newArrayList();
+    collectAll(Predicates.instanceOf(Subquery.class), subqueries);
+    if (subqueries.size() > 1) {
+      // TODO Remove that restriction when we add support for independent subquery
+      // evaluation.
+      throw new AnalysisException("Multiple subqueries are not supported in binary " +
+          "predicates: " + toSql());
+    }
+    if (contains(ExistsPredicate.class)) {
+      throw new AnalysisException("EXISTS subquery predicates are not " +
+          "supported in binary predicates: " + toSql());
+    }
+
+    List<InPredicate> inPredicates = Lists.newArrayList();
+    collect(InPredicate.class, inPredicates);
+    for (InPredicate inPredicate: inPredicates) {
+      if (inPredicate.contains(Subquery.class)) {
+        throw new AnalysisException("IN subquery predicates are not supported in " +
+            "binary predicates: " + toSql());
+      }
+    }
+
+    // Don't perform any casting for predicates with subqueries here. Any casting
+    // required will be performed when the subquery is unnested.
+    if (!contains(Subquery.class)) castForFunctionCall(true);
+
+    // Determine selectivity
+    // TODO: Compute selectivity for nested predicates.
+    // TODO: Improve estimation using histograms.
+    Reference<SlotRef> slotRefRef = new Reference<SlotRef>();
+    if ((op_ == Operator.EQ || op_ == Operator.NOT_DISTINCT)
+        && isSingleColumnPredicate(slotRefRef, null)) {
+      long distinctValues = slotRefRef.getRef().getNumDistinctValues();
+      if (distinctValues > 0) {
+        selectivity_ = 1.0 / distinctValues;
+        selectivity_ = Math.max(0, Math.min(1, selectivity_));
+      }
+    }
+
+    // Compute cost.
+    if (hasChildCosts()) {
+      if (getChild(0).getType().isFixedLengthType()) {
+        evalCost_ = getChildCosts() + BINARY_PREDICATE_COST;
+      } else if (getChild(0).getType().isStringType()) {
+        evalCost_ = getChildCosts() +
+            (float) (getAvgStringLength(getChild(0)) + getAvgStringLength(getChild(1)) *
+            BINARY_PREDICATE_COST);
+      } else {
+        //TODO(tmarshall): Handle other var length types here.
+        evalCost_ = getChildCosts() + VAR_LEN_BINARY_PREDICATE_COST;
+      }
+    }
+  }
+
+  /**
+   * If predicate is of the form "<slotref> <op> <expr>", returns expr,
+   * otherwise returns null. Slotref may be wrapped in a CastExpr.
+   * TODO: revisit CAST handling at the caller
+   */
+  public Expr getSlotBinding(SlotId id) {
+    // check left operand
+    SlotRef slotRef = getChild(0).unwrapSlotRef(false);
+    if (slotRef != null && slotRef.getSlotId() == id) return getChild(1);
+    // check right operand
+    slotRef = getChild(1).unwrapSlotRef(false);
+    if (slotRef != null && slotRef.getSlotId() == id) return getChild(0);
+    return null;
+  }
+
+  /**
+   * If e is an equality predicate between two slots that only require implicit
+   * casts, returns those two slots; otherwise returns null.
+   */
+  public static Pair<SlotId, SlotId> getEqSlots(Expr e) {
+    if (!(e instanceof BinaryPredicate)) return null;
+    return ((BinaryPredicate) e).getEqSlots();
+  }
+
+  /**
+   * If this is an equality predicate between two slots that only require implicit
+   * casts, returns those two slots; otherwise returns null.
+   */
+  @Override
+  public Pair<SlotId, SlotId> getEqSlots() {
+    if (op_ != Operator.EQ) return null;
+    SlotRef lhs = getChild(0).unwrapSlotRef(true);
+    if (lhs == null) return null;
+    SlotRef rhs = getChild(1).unwrapSlotRef(true);
+    if (rhs == null) return null;
+    return new Pair<SlotId, SlotId>(lhs.getSlotId(), rhs.getSlotId());
+  }
+
+  /**
+   * If predicate is of the form "<SlotRef> op <Expr>" or "<Expr> op <SlotRef>",
+   * returns the SlotRef, otherwise returns null.
+   */
+  @Override
+  public SlotRef getBoundSlot() {
+    SlotRef slotRef = getChild(0).unwrapSlotRef(true);
+    if (slotRef != null) return slotRef;
+    return getChild(1).unwrapSlotRef(true);
+  }
+
+  /**
+   * Negates a BinaryPredicate.
+   */
+  @Override
+  public Expr negate() {
+    Operator newOp = null;
+    switch (op_) {
+      case EQ:
+        newOp = Operator.NE;
+        break;
+      case NE:
+        newOp = Operator.EQ;
+        break;
+      case LT:
+        newOp = Operator.GE;
+        break;
+      case LE:
+        newOp = Operator.GT;
+        break;
+      case GE:
+        newOp = Operator.LT;
+        break;
+      case GT:
+        newOp = Operator.LE;
+        break;
+      case DISTINCT_FROM:
+        newOp = Operator.NOT_DISTINCT;
+        break;
+      case NOT_DISTINCT:
+        newOp = Operator.DISTINCT_FROM;
+        break;
+      case NULL_MATCHING_EQ:
+        throw new IllegalStateException("Not implemented");
+    }
+    return new BinaryPredicate(newOp, getChild(0), getChild(1));
+  }
+
+  /**
+   * Swaps the first with the second child in-place. Only valid to call for
+   * equivalence and not equal predicates.
+   */
+  public void reverse() {
+    Preconditions.checkState(op_.isEquivalence() || op_ == Operator.NE);
+    Collections.swap(children_, 0, 1);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    BinaryPredicate other = (BinaryPredicate) obj;
+    return op_.equals(other.op_);
+  }
+
+  @Override
+  public Expr clone() { return new BinaryPredicate(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java b/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
new file mode 100644
index 0000000..03b2b1f
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/BoolLiteral.java
@@ -0,0 +1,113 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TBoolLiteral;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.base.Objects;
+
+public class BoolLiteral extends LiteralExpr {
+  private final boolean value_;
+
+  public BoolLiteral(boolean value) {
+    this.value_ = value;
+    type_ = Type.BOOLEAN;
+    evalCost_ = LITERAL_COST;
+  }
+
+  public BoolLiteral(String value) throws AnalysisException {
+    type_ = Type.BOOLEAN;
+    evalCost_ = LITERAL_COST;
+    if (value.toLowerCase().equals("true")) {
+      this.value_ = true;
+    } else if (value.toLowerCase().equals("false")) {
+      this.value_ = false;
+    } else {
+      throw new AnalysisException("invalid BOOLEAN literal: " + value);
+    }
+  }
+
+  /**
+   * Copy c'tor used in clone.
+   */
+  protected BoolLiteral(BoolLiteral other) {
+    super(other);
+    value_ = other.value_;
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("value", value_)
+        .toString();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) {
+      return false;
+    }
+    return ((BoolLiteral) obj).value_ == value_;
+  }
+
+  @Override
+  public int hashCode() { return value_ ? 1 : 0; }
+
+  public boolean getValue() { return value_; }
+
+  @Override
+  public String toSqlImpl() {
+    return getStringValue();
+  }
+
+  @Override
+  public String getStringValue() {
+    return value_ ? "TRUE" : "FALSE";
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    msg.node_type = TExprNodeType.BOOL_LITERAL;
+    msg.bool_literal = new TBoolLiteral(value_);
+  }
+
+  @Override
+  protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
+    if (targetType.equals(this.type_)) {
+      return this;
+    } else {
+      return new CastExpr(targetType, this);
+    }
+  }
+
+  @Override
+  public int compareTo(LiteralExpr o) {
+    int ret = super.compareTo(o);
+    if (ret != 0) return ret;
+    BoolLiteral other = (BoolLiteral) o;
+    if (value_ && !other.getValue()) return 1;
+    if (!value_ && other.getValue()) return -1;
+    return 0;
+  }
+
+  @Override
+  public Expr clone() { return new BoolLiteral(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java b/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
new file mode 100644
index 0000000..bd3ec83
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CaseExpr.java
@@ -0,0 +1,379 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.PrimitiveType;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TCaseExpr;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * CASE and DECODE are represented using this class. The backend implementation is
+ * always the "case" function.
+ *
+ * The internal representation of
+ *   CASE [expr] WHEN expr THEN expr [WHEN expr THEN expr ...] [ELSE expr] END
+ * Each When/Then is stored as two consecutive children (whenExpr, thenExpr). If a case
+ * expr is given then it is the first child. If an else expr is given then it is the
+ * last child.
+ *
+ * The internal representation of
+ *   DECODE(expr, key_expr, val_expr [, key_expr, val_expr ...] [, default_val_expr])
+ * has a pair of children for each pair of key/val_expr and an additional child if the
+ * default_val_expr was given. The first child represents the comparison of expr to
+ * key_expr. Decode has three forms:
+ *   1) DECODE(expr, null_literal, val_expr) -
+ *       child[0] = IsNull(expr)
+ *   2) DECODE(expr, non_null_literal, val_expr) -
+ *       child[0] = Eq(expr, literal)
+ *   3) DECODE(expr1, expr2, val_expr) -
+ *       child[0] = Or(And(IsNull(expr1), IsNull(expr2)),  Eq(expr1, expr2))
+ * The children representing val_expr (child[1]) and default_val_expr (child[2]) are
+ * simply the exprs themselves.
+ *
+ * Example of equivalent CASE for DECODE(foo, 'bar', 1, col, 2, NULL, 3, 4):
+ *   CASE
+ *     WHEN foo = 'bar' THEN 1   -- no need for IS NULL check
+ *     WHEN foo IS NULL AND col IS NULL OR foo = col THEN 2
+ *     WHEN foo IS NULL THEN 3  -- no need for equality check
+ *     ELSE 4
+ *   END
+ */
+public class CaseExpr extends Expr {
+
+  // Set if constructed from a DECODE, null otherwise.
+  private FunctionCallExpr decodeExpr_;
+
+  private boolean hasCaseExpr_;
+  private boolean hasElseExpr_;
+
+  public CaseExpr(Expr caseExpr, List<CaseWhenClause> whenClauses, Expr elseExpr) {
+    super();
+    if (caseExpr != null) {
+      children_.add(caseExpr);
+      hasCaseExpr_ = true;
+    }
+    for (CaseWhenClause whenClause: whenClauses) {
+      Preconditions.checkNotNull(whenClause.getWhenExpr());
+      children_.add(whenClause.getWhenExpr());
+      Preconditions.checkNotNull(whenClause.getThenExpr());
+      children_.add(whenClause.getThenExpr());
+    }
+    if (elseExpr != null) {
+      children_.add(elseExpr);
+      hasElseExpr_ = true;
+    }
+  }
+
+  /**
+   * Constructs an equivalent CaseExpr representation.
+   *
+   * The DECODE behavior is basically the same as the hasCaseExpr_ version of CASE.
+   * Though there is one difference. NULLs are considered equal when comparing the
+   * argument to be decoded with the candidates. This differences is for compatibility
+   * with Oracle. http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm.
+   * To account for the difference, the CASE representation will use the non-hasCaseExpr_
+   * version.
+   *
+   * The return type of DECODE differs from that of Oracle when the third argument is
+   * the NULL literal. In Oracle the return type is STRING. In Impala the return type is
+   * determined by the implicit casting rules (i.e. it's not necessarily a STRING). This
+   * is done so seemingly normal usages such as DECODE(int_col, tinyint_col, NULL,
+   * bigint_col) will avoid type check errors (STRING incompatible with BIGINT).
+   */
+  public CaseExpr(FunctionCallExpr decodeExpr) {
+    super();
+    decodeExpr_ = decodeExpr;
+    hasCaseExpr_ = false;
+
+    int childIdx = 0;
+    Expr encoded = null;
+    Expr encodedIsNull = null;
+    if (!decodeExpr.getChildren().isEmpty()) {
+      encoded = decodeExpr.getChild(childIdx++);
+      encodedIsNull = new IsNullPredicate(encoded, false);
+    }
+
+    // Add the key_expr/val_expr pairs
+    while (childIdx + 2 <= decodeExpr.getChildren().size()) {
+      Expr candidate = decodeExpr.getChild(childIdx++);
+      if (candidate.isLiteral()) {
+        if (candidate.isNullLiteral()) {
+          // An example case is DECODE(foo, NULL, bar), since NULLs are considered
+          // equal, this becomes CASE WHEN foo IS NULL THEN bar END.
+          children_.add(encodedIsNull);
+        } else {
+          children_.add(new BinaryPredicate(
+              BinaryPredicate.Operator.EQ, encoded, candidate));
+        }
+      } else {
+        children_.add(new CompoundPredicate(CompoundPredicate.Operator.OR,
+            new CompoundPredicate(CompoundPredicate.Operator.AND,
+                encodedIsNull, new IsNullPredicate(candidate, false)),
+            new BinaryPredicate(BinaryPredicate.Operator.EQ, encoded, candidate)));
+      }
+
+      // Add the value
+      children_.add(decodeExpr.getChild(childIdx++));
+    }
+
+    // Add the default value
+    if (childIdx < decodeExpr.getChildren().size()) {
+      hasElseExpr_ = true;
+      children_.add(decodeExpr.getChild(childIdx));
+    }
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected CaseExpr(CaseExpr other) {
+    super(other);
+    decodeExpr_ = other.decodeExpr_;
+    hasCaseExpr_ = other.hasCaseExpr_;
+    hasElseExpr_ = other.hasElseExpr_;
+  }
+
+  public static void initBuiltins(Db db) {
+    for (Type t: Type.getSupportedTypes()) {
+      if (t.isNull()) continue;
+      if (t.isScalarType(PrimitiveType.CHAR)) continue;
+      // TODO: case is special and the signature cannot be represented.
+      // It is alternating varargs
+      // e.g. case(bool, type, bool type, bool type, etc).
+      // Instead we just add a version for each of the return types
+      // e.g. case(BOOLEAN), case(INT), etc
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          "case", "", Lists.newArrayList(t), t));
+      // Same for DECODE
+      db.addBuiltin(ScalarFunction.createBuiltinOperator(
+          "decode", "", Lists.newArrayList(t), t));
+    }
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) return false;
+    CaseExpr expr = (CaseExpr) obj;
+    return hasCaseExpr_ == expr.hasCaseExpr_
+        && hasElseExpr_ == expr.hasElseExpr_
+        && isDecode() == expr.isDecode();
+  }
+
+  @Override
+  public String toSqlImpl() {
+    return (decodeExpr_ == null) ? toCaseSql() : decodeExpr_.toSqlImpl();
+  }
+
+  @VisibleForTesting
+  String toCaseSql() {
+    StringBuilder output = new StringBuilder("CASE");
+    int childIdx = 0;
+    if (hasCaseExpr_) {
+      output.append(" " + children_.get(childIdx++).toSql());
+    }
+    while (childIdx + 2 <= children_.size()) {
+      output.append(" WHEN " + children_.get(childIdx++).toSql());
+      output.append(" THEN " + children_.get(childIdx++).toSql());
+    }
+    if (hasElseExpr_) {
+      output.append(" ELSE " + children_.get(children_.size() - 1).toSql());
+    }
+    output.append(" END");
+    return output.toString();
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    msg.node_type = TExprNodeType.CASE_EXPR;
+    msg.case_expr = new TCaseExpr(hasCaseExpr_, hasElseExpr_);
+  }
+
+  private void castCharToString(int childIndex) throws AnalysisException {
+    if (children_.get(childIndex).getType().isScalarType(PrimitiveType.CHAR)) {
+      children_.set(childIndex, children_.get(childIndex).castTo(ScalarType.STRING));
+    }
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    super.analyze(analyzer);
+
+    if (isDecode()) {
+      Preconditions.checkState(!hasCaseExpr_);
+      // decodeExpr_.analyze() would fail validating function existence. The complex
+      // vararg signature is currently unsupported.
+      FunctionCallExpr.validateScalarFnParams(decodeExpr_.getParams());
+      if (decodeExpr_.getChildren().size() < 3) {
+        throw new AnalysisException("DECODE in '" + toSql() + "' requires at least 3 "
+            + "arguments.");
+      }
+    }
+
+    // Since we have no BE implementation of a CaseExpr with CHAR types,
+    // we cast the CHAR-typed whenExprs and caseExprs to STRING,
+    // TODO: This casting is not always correct and needs to be fixed, see IMPALA-1652.
+
+    // Keep track of maximum compatible type of case expr and all when exprs.
+    Type whenType = null;
+    // Keep track of maximum compatible type of else expr and all then exprs.
+    Type returnType = null;
+    // Remember last of these exprs for error reporting.
+    Expr lastCompatibleThenExpr = null;
+    Expr lastCompatibleWhenExpr = null;
+    int loopEnd = children_.size();
+    if (hasElseExpr_) {
+      --loopEnd;
+    }
+    int loopStart;
+    Expr caseExpr = null;
+    // Set loop start, and initialize returnType as type of castExpr.
+    if (hasCaseExpr_) {
+      loopStart = 1;
+      castCharToString(0);
+      caseExpr = children_.get(0);
+      caseExpr.analyze(analyzer);
+      whenType = caseExpr.getType();
+      lastCompatibleWhenExpr = children_.get(0);
+    } else {
+      whenType = Type.BOOLEAN;
+      loopStart = 0;
+    }
+
+    // Go through when/then exprs and determine compatible types.
+    for (int i = loopStart; i < loopEnd; i += 2) {
+      castCharToString(i);
+      Expr whenExpr = children_.get(i);
+      if (hasCaseExpr_) {
+        // Determine maximum compatible type of the case expr,
+        // and all when exprs seen so far. We will add casts to them at the very end.
+        whenType = analyzer.getCompatibleType(whenType,
+            lastCompatibleWhenExpr, whenExpr);
+        lastCompatibleWhenExpr = whenExpr;
+      } else {
+        // If no case expr was given, then the when exprs should always return
+        // boolean or be castable to boolean.
+        if (!Type.isImplicitlyCastable(whenExpr.getType(), Type.BOOLEAN, false)) {
+          Preconditions.checkState(isCase());
+          throw new AnalysisException("When expr '" + whenExpr.toSql() + "'" +
+              " is not of type boolean and not castable to type boolean.");
+        }
+        // Add a cast if necessary.
+        if (!whenExpr.getType().isBoolean()) castChild(Type.BOOLEAN, i);
+      }
+      // Determine maximum compatible type of the then exprs seen so far.
+      // We will add casts to them at the very end.
+      Expr thenExpr = children_.get(i + 1);
+      returnType = analyzer.getCompatibleType(returnType,
+          lastCompatibleThenExpr, thenExpr);
+      lastCompatibleThenExpr = thenExpr;
+    }
+    if (hasElseExpr_) {
+      Expr elseExpr = children_.get(children_.size() - 1);
+      returnType = analyzer.getCompatibleType(returnType,
+          lastCompatibleThenExpr, elseExpr);
+    }
+
+    // Make sure BE doesn't see TYPE_NULL by picking an arbitrary type
+    if (whenType.isNull()) whenType = ScalarType.BOOLEAN;
+    if (returnType.isNull()) returnType = ScalarType.BOOLEAN;
+
+    // Add casts to case expr to compatible type.
+    if (hasCaseExpr_) {
+      // Cast case expr.
+      if (!children_.get(0).type_.equals(whenType)) {
+        castChild(whenType, 0);
+      }
+      // Add casts to when exprs to compatible type.
+      for (int i = loopStart; i < loopEnd; i += 2) {
+        if (!children_.get(i).type_.equals(whenType)) {
+          castChild(whenType, i);
+        }
+      }
+    }
+    // Cast then exprs to compatible type.
+    for (int i = loopStart + 1; i < children_.size(); i += 2) {
+      if (!children_.get(i).type_.equals(returnType)) {
+        castChild(returnType, i);
+      }
+    }
+    // Cast else expr to compatible type.
+    if (hasElseExpr_) {
+      if (!children_.get(children_.size() - 1).type_.equals(returnType)) {
+        castChild(returnType, children_.size() - 1);
+      }
+    }
+
+    // Do the function lookup just based on the whenType.
+    Type[] args = new Type[1];
+    args[0] = whenType;
+    fn_ = getBuiltinFunction(analyzer, "case", args,
+        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+    Preconditions.checkNotNull(fn_);
+    type_ = returnType;
+
+    // Compute cost as the sum of evaluating all of the WHEN exprs, plus
+    // the max of the THEN/ELSE exprs.
+    float maxThenCost = 0;
+    float whenCosts = 0;
+    boolean hasChildCosts = true;
+    for (int i = 0; i < children_.size(); ++i) {
+      if (!getChild(i).hasCost()) {
+        hasChildCosts = false;
+        break;
+      }
+
+      if (hasCaseExpr_ && i % 2 == 1) {
+        // This child is a WHEN expr. BINARY_PREDICATE_COST accounts for the cost of
+        // comparing the CASE expr to the WHEN expr.
+        whenCosts += getChild(0).getCost() + getChild(i).getCost() +
+          BINARY_PREDICATE_COST;
+      } else if (!hasCaseExpr_ && i % 2 == 0) {
+        // This child is a WHEN expr.
+        whenCosts += getChild(i).getCost();
+      } else if (i != 0) {
+        // This child is a THEN or ELSE expr.
+        float thenCost = getChild(i).getCost();
+        if (thenCost > maxThenCost) maxThenCost = thenCost;
+      }
+    }
+    if (hasChildCosts) {
+      evalCost_ =  whenCosts + maxThenCost;
+    }
+  }
+
+  private boolean isCase() { return !isDecode(); }
+  private boolean isDecode() { return decodeExpr_ != null; }
+  public boolean hasCaseExpr() { return hasCaseExpr_; }
+  public boolean hasElseExpr() { return hasElseExpr_; }
+
+  @Override
+  public Expr clone() { return new CaseExpr(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java b/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
new file mode 100644
index 0000000..8b1433e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CaseWhenClause.java
@@ -0,0 +1,42 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+
+/**
+ * captures info of a single WHEN expr THEN expr clause.
+ *
+ */
+class CaseWhenClause {
+  private final Expr whenExpr_;
+  private final Expr thenExpr_;
+
+  public CaseWhenClause(Expr whenExpr, Expr thenExpr) {
+    super();
+    this.whenExpr_ = whenExpr;
+    this.thenExpr_ = thenExpr;
+  }
+
+  public Expr getWhenExpr() {
+    return whenExpr_;
+  }
+
+  public Expr getThenExpr() {
+    return thenExpr_;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CastExpr.java b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
new file mode 100644
index 0000000..2b3b271
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
@@ -0,0 +1,312 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Db;
+import com.cloudera.impala.catalog.Function;
+import com.cloudera.impala.catalog.Function.CompareMode;
+import com.cloudera.impala.catalog.PrimitiveType;
+import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TExpr;
+import com.cloudera.impala.thrift.TExprNode;
+import com.cloudera.impala.thrift.TExprNodeType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+public class CastExpr extends Expr {
+  // Only set for explicit casts. Null for implicit casts.
+  private final TypeDef targetTypeDef_;
+
+  // True if this is a "pre-analyzed" implicit cast.
+  private final boolean isImplicit_;
+
+  // True if this cast does not change the type.
+  private boolean noOp_ = false;
+
+  /**
+   * C'tor for "pre-analyzed" implicit casts.
+   */
+  public CastExpr(Type targetType, Expr e) {
+    super();
+    Preconditions.checkState(targetType.isValid());
+    Preconditions.checkNotNull(e);
+    type_ = targetType;
+    targetTypeDef_ = null;
+    isImplicit_ = true;
+    // replace existing implicit casts
+    if (e instanceof CastExpr) {
+      CastExpr castExpr = (CastExpr) e;
+      if (castExpr.isImplicit()) e = castExpr.getChild(0);
+    }
+    children_.add(e);
+
+    // Implicit casts don't call analyze()
+    // TODO: this doesn't seem like the cleanest approach but there are places
+    // we generate these (e.g. table loading) where there is no analyzer object.
+    try {
+      analyze();
+      computeNumDistinctValues();
+    } catch (AnalysisException ex) {
+      Preconditions.checkState(false,
+          "Implicit casts should never throw analysis exception.");
+    }
+    isAnalyzed_ = true;
+  }
+
+  /**
+   * C'tor for explicit casts.
+   */
+  public CastExpr(TypeDef targetTypeDef, Expr e) {
+    Preconditions.checkNotNull(targetTypeDef);
+    Preconditions.checkNotNull(e);
+    isImplicit_ = false;
+    targetTypeDef_ = targetTypeDef;
+    children_.add(e);
+  }
+
+  /**
+   * Copy c'tor used in clone().
+   */
+  protected CastExpr(CastExpr other) {
+    super(other);
+    targetTypeDef_ = other.targetTypeDef_;
+    isImplicit_ = other.isImplicit_;
+    noOp_ = other.noOp_;
+  }
+
+  private static String getFnName(Type targetType) {
+    return "castTo" + targetType.getPrimitiveType().toString();
+  }
+
+  public static void initBuiltins(Db db) {
+    for (Type fromType : Type.getSupportedTypes()) {
+      if (fromType.isNull()) continue;
+      for (Type toType : Type.getSupportedTypes()) {
+        if (toType.isNull()) continue;
+        // Disable casting from string to boolean
+        if (fromType.isStringType() && toType.isBoolean()) continue;
+        // Disable casting from boolean/timestamp to decimal
+        if ((fromType.isBoolean() || fromType.isDateType()) && toType.isDecimal()) {
+          continue;
+        }
+        if (fromType.getPrimitiveType() == PrimitiveType.STRING
+            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
+          // Allow casting from String to Char(N)
+          String beSymbol = "impala::CastFunctions::CastToChar";
+          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
+              Lists.newArrayList((Type) ScalarType.STRING), false, ScalarType.CHAR,
+              beSymbol, null, null, true));
+          continue;
+        }
+        if (fromType.getPrimitiveType() == PrimitiveType.CHAR
+            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
+          // Allow casting from CHAR(N) to Char(N)
+          String beSymbol = "impala::CastFunctions::CastToChar";
+          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
+              Lists.newArrayList((Type) ScalarType.createCharType(-1)), false,
+              ScalarType.CHAR, beSymbol, null, null, true));
+          continue;
+        }
+        if (fromType.getPrimitiveType() == PrimitiveType.VARCHAR
+            && toType.getPrimitiveType() == PrimitiveType.VARCHAR) {
+          // Allow casting from VARCHAR(N) to VARCHAR(M)
+          String beSymbol = "impala::CastFunctions::CastToStringVal";
+          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.VARCHAR),
+              Lists.newArrayList((Type) ScalarType.VARCHAR), false, ScalarType.VARCHAR,
+              beSymbol, null, null, true));
+          continue;
+        }
+        if (fromType.getPrimitiveType() == PrimitiveType.VARCHAR
+            && toType.getPrimitiveType() == PrimitiveType.CHAR) {
+          // Allow casting from VARCHAR(N) to CHAR(M)
+          String beSymbol = "impala::CastFunctions::CastToChar";
+          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.CHAR),
+              Lists.newArrayList((Type) ScalarType.VARCHAR), false, ScalarType.CHAR,
+              beSymbol, null, null, true));
+          continue;
+        }
+        if (fromType.getPrimitiveType() == PrimitiveType.CHAR
+            && toType.getPrimitiveType() == PrimitiveType.VARCHAR) {
+          // Allow casting from CHAR(N) to VARCHAR(M)
+          String beSymbol = "impala::CastFunctions::CastToStringVal";
+          db.addBuiltin(ScalarFunction.createBuiltin(getFnName(ScalarType.VARCHAR),
+              Lists.newArrayList((Type) ScalarType.CHAR), false, ScalarType.VARCHAR,
+              beSymbol, null, null, true));
+          continue;
+        }
+        // Disable no-op casts
+        if (fromType.equals(toType) && !fromType.isDecimal()) continue;
+        String beClass = toType.isDecimal() || fromType.isDecimal() ?
+            "DecimalOperators" : "CastFunctions";
+        String beSymbol = "impala::" + beClass + "::CastTo" + Function.getUdfType(toType);
+        db.addBuiltin(ScalarFunction.createBuiltin(getFnName(toType),
+            Lists.newArrayList(fromType), false, toType, beSymbol,
+            null, null, true));
+      }
+    }
+  }
+
+  @Override
+  public String toSqlImpl() {
+    if (isImplicit_) return getChild(0).toSql();
+    return "CAST(" + getChild(0).toSql() + " AS " + targetTypeDef_.toString() + ")";
+  }
+
+  @Override
+  protected void treeToThriftHelper(TExpr container) {
+    if (noOp_) {
+      getChild(0).treeToThriftHelper(container);
+      return;
+    }
+    super.treeToThriftHelper(container);
+  }
+
+  @Override
+  protected void toThrift(TExprNode msg) {
+    msg.node_type = TExprNodeType.FUNCTION_CALL;
+  }
+
+  @Override
+  public String debugString() {
+    return Objects.toStringHelper(this)
+        .add("isImplicit", isImplicit_)
+        .add("target", type_)
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  public boolean isImplicit() { return isImplicit_; }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    Preconditions.checkState(!isImplicit_);
+    super.analyze(analyzer);
+    targetTypeDef_.analyze(analyzer);
+    type_ = targetTypeDef_.getType();
+    analyze();
+  }
+
+  private void analyze() throws AnalysisException {
+    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + CAST_COST;
+
+    Preconditions.checkNotNull(type_);
+    if (type_.isComplexType()) {
+      throw new AnalysisException(
+          "Unsupported cast to complex type: " + type_.toSql());
+    }
+
+    boolean readyForCharCast =
+        children_.get(0).getType().getPrimitiveType() == PrimitiveType.STRING ||
+        children_.get(0).getType().getPrimitiveType() == PrimitiveType.CHAR;
+    if (type_.getPrimitiveType() == PrimitiveType.CHAR && !readyForCharCast) {
+      // Back end functions only exist to cast string types to CHAR, there is not a cast
+      // for every type since it is redundant with STRING. Casts to go through 2 casts:
+      // (1) cast to string, to stringify the value
+      // (2) cast to CHAR, to truncate or pad with spaces
+      CastExpr tostring = new CastExpr(ScalarType.STRING, children_.get(0));
+      tostring.analyze();
+      children_.set(0, tostring);
+    }
+
+    if (children_.get(0) instanceof NumericLiteral && type_.isFloatingPointType()) {
+      // Special case casting a decimal literal to a floating point number. The
+      // decimal literal can be interpreted as either and we want to avoid casts
+      // since that can result in loss of accuracy.
+      ((NumericLiteral)children_.get(0)).explicitlyCastToFloat(type_);
+    }
+
+    if (children_.get(0).getType().isNull()) {
+      // Make sure BE never sees TYPE_NULL
+      uncheckedCastChild(type_, 0);
+    }
+
+    // Ensure child has non-null type (even if it's a null literal). This is required
+    // for the UDF interface.
+    if (children_.get(0) instanceof NullLiteral) {
+      NullLiteral nullChild = (NullLiteral)(children_.get(0));
+      nullChild.uncheckedCastTo(type_);
+    }
+
+    Type childType = children_.get(0).type_;
+    Preconditions.checkState(!childType.isNull());
+    if (childType.equals(type_)) {
+      noOp_ = true;
+      return;
+    }
+
+    FunctionName fnName = new FunctionName(Catalog.BUILTINS_DB, getFnName(type_));
+    Type[] args = { childType };
+    Function searchDesc = new Function(fnName, args, Type.INVALID, false);
+    if (isImplicit_) {
+      fn_ = Catalog.getBuiltin(searchDesc, CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
+      Preconditions.checkState(fn_ != null);
+    } else {
+      fn_ = Catalog.getBuiltin(searchDesc, CompareMode.IS_IDENTICAL);
+      if (fn_ == null) {
+        // allow for promotion from CHAR to STRING; only if no exact match is found
+        fn_ = Catalog.getBuiltin(searchDesc.promoteCharsToStrings(),
+            CompareMode.IS_IDENTICAL);
+      }
+    }
+    if (fn_ == null) {
+      throw new AnalysisException("Invalid type cast of " + getChild(0).toSql() +
+          " from " + childType + " to " + type_);
+    }
+
+    Preconditions.checkState(type_.matchesType(fn_.getReturnType()),
+        type_ + " != " + fn_.getReturnType());
+  }
+
+  /**
+   * Returns child expr if this expr is an implicit cast, otherwise returns 'this'.
+   */
+  @Override
+  public Expr ignoreImplicitCast() {
+    if (isImplicit_) {
+      // we don't expect to see to consecutive implicit casts
+      Preconditions.checkState(
+          !(getChild(0) instanceof CastExpr) || !((CastExpr) getChild(0)).isImplicit());
+      return getChild(0);
+    } else {
+      return this;
+    }
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (obj instanceof CastExpr) {
+      CastExpr other = (CastExpr) obj;
+      return isImplicit_ == other.isImplicit_
+          && type_.equals(other.type_)
+          && super.equals(obj);
+    }
+    // Ignore implicit casts when comparing expr trees.
+    if (isImplicit_) return getChild(0).equals(obj);
+    return false;
+  }
+
+  @Override
+  public Expr clone() { return new CastExpr(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java b/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
new file mode 100644
index 0000000..b45b856
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionStructType.java
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+
+import jline.internal.Preconditions;
+
+import com.cloudera.impala.catalog.ArrayType;
+import com.cloudera.impala.catalog.MapType;
+import com.cloudera.impala.catalog.ScalarType;
+import com.cloudera.impala.catalog.StructField;
+import com.cloudera.impala.catalog.StructType;
+import com.cloudera.impala.catalog.Type;
+import com.google.common.collect.Lists;
+
+/**
+ * Generated struct type describing the fields of a collection type
+ * that can be referenced in paths.
+ *
+ * Parent Type      CollectionStructType
+ * array<i>    -->  struct<item:i,pos:bigint>
+ * map<k,v>    -->  struct<key:k,value:v>
+ */
+public class CollectionStructType extends StructType {
+  // True if this struct describes the fields of a map,
+  // false if it describes the fields of an array.
+  private final boolean isMapStruct_;
+
+  // Field that can be skipped by implicit paths if its type is a struct.
+  private final StructField optionalField_;
+
+  private CollectionStructType(ArrayList<StructField> fields, boolean isMapStruct) {
+    super(fields);
+    isMapStruct_ = isMapStruct;
+    if (isMapStruct_) {
+      optionalField_ = getField(Path.MAP_VALUE_FIELD_NAME);
+    } else {
+      optionalField_ = getField(Path.ARRAY_ITEM_FIELD_NAME);
+    }
+    Preconditions.checkNotNull(optionalField_);
+  }
+
+  public static CollectionStructType createArrayStructType(ArrayType arrayType) {
+    Type itemType = arrayType.getItemType();
+    ArrayList<StructField> fields = Lists.newArrayListWithCapacity(2);
+    // The item field name comes before the pos field name so that a path to the
+    // stored item corresponds to its physical path.
+    fields.add(new StructField(Path.ARRAY_ITEM_FIELD_NAME, itemType));
+    fields.add(new StructField(Path.ARRAY_POS_FIELD_NAME, ScalarType.BIGINT));
+    return new CollectionStructType(fields, false);
+  }
+
+  public static CollectionStructType createMapStructType(MapType mapType) {
+    ArrayList<StructField> mapFields = Lists.newArrayListWithCapacity(2);
+    mapFields.add(new StructField(Path.MAP_KEY_FIELD_NAME, mapType.getKeyType()));
+    mapFields.add(new StructField(Path.MAP_VALUE_FIELD_NAME, mapType.getValueType()));
+    return new CollectionStructType(mapFields, true);
+  }
+
+  public StructField getOptionalField() { return optionalField_; }
+  public boolean isMapStruct() { return isMapStruct_; }
+  public boolean isArrayStruct() { return !isMapStruct_; }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
new file mode 100644
index 0000000..8abed3e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
@@ -0,0 +1,138 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
+import com.cloudera.impala.common.AnalysisException;
+import com.google.common.base.Preconditions;
+
+/**
+ * Reference to a MAP or ARRAY collection type that implies its
+ * flattening during execution.
+ * TODO: We currently create a new slot in the root tuple descriptor for every
+ * relative collection ref, even if they have the same path. The BE currently relies on
+ * this behavior for setting collection slots to NULL after they have been unnested
+ * inside a SubplanNode. We could instead share the slot and the corresponding item tuple
+ * descriptor among all collection table refs with the same path. This change will
+ * require decoupling tuple descriptors from table aliases, i.e., a tuple descriptor
+ * should be able to back multiple aliases.
+ */
+public class CollectionTableRef extends TableRef {
+  /////////////////////////////////////////
+  // BEGIN: Members that need to be reset()
+
+  // Expr that returns the referenced collection. Typically a SlotRef into the
+  // parent scan's tuple. Result of analysis. Fully resolved against base tables.
+  private Expr collectionExpr_;
+
+  // END: Members that need to be reset()
+  /////////////////////////////////////////
+
+  /**
+   * Create a CollectionTableRef from the original unresolved table ref as well as
+   * its resolved path. Sets table aliases and join-related attributes.
+   */
+  public CollectionTableRef(TableRef tableRef, Path resolvedPath) {
+    super(tableRef);
+    Preconditions.checkState(resolvedPath.isResolved());
+    resolvedPath_ = resolvedPath;
+    // Use the last path element as an implicit alias if no explicit alias was given.
+    if (hasExplicitAlias()) return;
+    String implicitAlias = rawPath_.get(rawPath_.size() - 1).toLowerCase();
+    aliases_ = new String[] { implicitAlias };
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  public CollectionTableRef(CollectionTableRef other) {
+    super(other);
+    collectionExpr_ =
+        (other.collectionExpr_ != null) ? other.collectionExpr_.clone() : null;
+  }
+
+  /**
+   * Registers this collection table ref with the given analyzer and adds a slot
+   * descriptor for the materialized collection to be populated by parent scan.
+   * Also determines whether this collection table ref is correlated or not.
+   *
+   * If this function is called in the context of analyzing a WITH clause, then
+   * no slot is added to the parent descriptor so as to not pollute the analysis
+   * state of the parent block (the WITH-clause analyzer is discarded, and the
+   * parent analyzer could have an entirely different global state).
+   */
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (isAnalyzed_) return;
+    desc_ = analyzer.registerTableRef(this);
+    if (isRelative() && !analyzer.isWithClause()) {
+      SlotDescriptor parentSlotDesc = analyzer.registerSlotRef(resolvedPath_);
+      parentSlotDesc.setItemTupleDesc(desc_);
+      collectionExpr_ = new SlotRef(parentSlotDesc);
+      // Must always be materialized to ensure the correct cardinality after unnesting.
+      analyzer.materializeSlots(collectionExpr_);
+      Analyzer parentAnalyzer =
+          analyzer.findAnalyzer(resolvedPath_.getRootDesc().getId());
+      Preconditions.checkNotNull(parentAnalyzer);
+      if (parentAnalyzer != analyzer) {
+        TableRef parentRef =
+            parentAnalyzer.getTableRef(resolvedPath_.getRootDesc().getId());
+        Preconditions.checkNotNull(parentRef);
+        // InlineViews are currently not supported as a parent ref.
+        Preconditions.checkState(!(parentRef instanceof InlineViewRef));
+        correlatedTupleIds_.add(parentRef.getId());
+      }
+    }
+    if (!isRelative()) {
+      // Register a table-level privilege request as well as a column-level privilege request
+      // for the collection-typed column.
+      Preconditions.checkNotNull(resolvedPath_.getRootTable());
+      analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), analyzer);
+      analyzer.registerPrivReq(new PrivilegeRequestBuilder().
+          allOf(Privilege.SELECT).onColumn(desc_.getTableName().getDb(),
+          desc_.getTableName().getTbl(), desc_.getPath().getRawPath().get(0))
+          .toRequest());
+    }
+    isAnalyzed_ = true;
+    analyzeHints(analyzer);
+
+    // TODO: For joins on nested collections some join ops can be simplified
+    // due to the containment relationship of the parent and child. For example,
+    // a FULL OUTER JOIN would become a LEFT OUTER JOIN, or a RIGHT SEMI JOIN
+    // would become an INNER or CROSS JOIN.
+    analyzeJoin(analyzer);
+  }
+
+  @Override
+  public boolean isRelative() {
+    Preconditions.checkNotNull(resolvedPath_);
+    return resolvedPath_.getRootDesc() != null;
+  }
+
+  public Expr getCollectionExpr() { return collectionExpr_; }
+
+  @Override
+  protected CollectionTableRef clone() { return new CollectionTableRef(this); }
+
+  @Override
+  public void reset() {
+    super.reset();
+    collectionExpr_ = null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
new file mode 100644
index 0000000..e7a3170
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -0,0 +1,143 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TColumn;
+import com.cloudera.impala.util.MetaStoreUtil;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Represents a column definition in a CREATE/ALTER TABLE/VIEW statement.
+ * Column definitions in CREATE/ALTER TABLE statements require a column type,
+ * whereas column definitions in CREATE/ALTER VIEW statements infer the column type from
+ * the corresponding view definition. All column definitions have an optional comment.
+ * Since a column definition refers a column stored in the Metastore, the column name
+ * must be valid according to the Metastore's rules (see @MetaStoreUtils).
+ */
+public class ColumnDef {
+  private final String colName_;
+  private String comment_;
+
+  // Required in CREATE/ALTER TABLE stmts. Set to NULL in CREATE/ALTER VIEW stmts,
+  // for which we setType() after analyzing the defining view definition stmt.
+  private final TypeDef typeDef_;
+  private Type type_;
+
+  public ColumnDef(String colName, TypeDef typeDef, String comment) {
+    colName_ = colName.toLowerCase();
+    typeDef_ = typeDef;
+    comment_ = comment;
+  }
+
+  /**
+   * Creates an analyzed ColumnDef from a Hive FieldSchema. Throws if the FieldSchema's
+   * type is not supported.
+   */
+  private ColumnDef(FieldSchema fs) throws AnalysisException {
+    Type type = Type.parseColumnType(fs.getType());
+    if (type == null) {
+      throw new AnalysisException(String.format(
+          "Unsupported type '%s' in Hive field schema '%s'",
+          fs.getType(), fs.getName()));
+    }
+    colName_ = fs.getName();
+    typeDef_ = new TypeDef(type);
+    comment_ = fs.getComment();
+    analyze();
+  }
+
+  public void setType(Type type) { type_ = type; }
+  public Type getType() { return type_; }
+  public TypeDef getTypeDef() { return typeDef_; }
+  public String getColName() { return colName_; }
+  public void setComment(String comment) { comment_ = comment; }
+  public String getComment() { return comment_; }
+
+  public void analyze() throws AnalysisException {
+    // Check whether the column name meets the Metastore's requirements.
+    if (!MetaStoreUtils.validateName(colName_)) {
+      throw new AnalysisException("Invalid column/field name: " + colName_);
+    }
+    if (typeDef_ != null) {
+      typeDef_.analyze(null);
+      type_ = typeDef_.getType();
+    }
+    Preconditions.checkNotNull(type_);
+    Preconditions.checkState(type_.isValid());
+    // Check HMS constraints of type and comment.
+    String typeSql = type_.toSql();
+    if (typeSql.length() > MetaStoreUtil.MAX_TYPE_NAME_LENGTH) {
+      throw new AnalysisException(String.format(
+          "Type of column '%s' exceeds maximum type length of %d characters:\n" +
+          "%s has %d characters.", colName_, MetaStoreUtil.MAX_TYPE_NAME_LENGTH,
+          typeSql, typeSql.length()));
+    }
+    if (comment_ != null &&
+        comment_.length() > MetaStoreUtil.CREATE_MAX_COMMENT_LENGTH) {
+      throw new AnalysisException(String.format(
+          "Comment of column '%s' exceeds maximum length of %d characters:\n" +
+          "%s has %d characters.", colName_, MetaStoreUtil.CREATE_MAX_COMMENT_LENGTH,
+          comment_, comment_.length()));
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder(colName_);
+    if (type_ != null) {
+      sb.append(" " + type_.toString());
+    } else {
+      sb.append(" " + typeDef_.toString());
+    }
+    if (comment_ != null) sb.append(String.format(" COMMENT '%s'", comment_));
+    return sb.toString();
+  }
+
+  public TColumn toThrift() {
+    TColumn col = new TColumn(new TColumn(getColName(), type_.toThrift()));
+    col.setComment(getComment());
+    return col;
+  }
+
+  public static List<ColumnDef> createFromFieldSchemas(List<FieldSchema> fieldSchemas)
+      throws AnalysisException {
+    List<ColumnDef> result = Lists.newArrayListWithCapacity(fieldSchemas.size());
+    for (FieldSchema fs: fieldSchemas) result.add(new ColumnDef(fs));
+    return result;
+  }
+
+  public static List<FieldSchema> toFieldSchemas(List<ColumnDef> colDefs) {
+    return Lists.transform(colDefs, new Function<ColumnDef, FieldSchema>() {
+      public FieldSchema apply(ColumnDef colDef) {
+        Preconditions.checkNotNull(colDef.getType());
+        return new FieldSchema(colDef.getColName(), colDef.getType().toSql(),
+            colDef.getComment());
+      }
+    });
+  }
+
+}



[55/61] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 2)

Posted by kw...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/Type.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Type.java b/fe/src/main/java/org/apache/impala/catalog/Type.java
index 0162ec6..91fc2e3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Type.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Type.java
@@ -15,23 +15,23 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.analysis.CreateTableStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.analysis.TypeDef;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TColumnType;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.thrift.TScalarType;
-import com.cloudera.impala.thrift.TStructField;
-import com.cloudera.impala.thrift.TTypeNode;
-import com.cloudera.impala.thrift.TTypeNodeType;
+import org.apache.impala.analysis.CreateTableStmt;
+import org.apache.impala.analysis.SqlParser;
+import org.apache.impala.analysis.SqlScanner;
+import org.apache.impala.analysis.TypeDef;
+import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TColumnType;
+import org.apache.impala.thrift.TPrimitiveType;
+import org.apache.impala.thrift.TScalarType;
+import org.apache.impala.thrift.TStructField;
+import org.apache.impala.thrift.TTypeNode;
+import org.apache.impala.thrift.TTypeNodeType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/View.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/View.java b/fe/src/main/java/org/apache/impala/catalog/View.java
index cc82f95..46f089e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/View.java
+++ b/fe/src/main/java/org/apache/impala/catalog/View.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog;
+package org.apache.impala.catalog;
 
 import java.io.StringReader;
 import java.util.List;
@@ -24,14 +24,14 @@ import java.util.Set;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
-import com.cloudera.impala.analysis.ParseNode;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.analysis.SqlParser;
-import com.cloudera.impala.analysis.SqlScanner;
-import com.cloudera.impala.thrift.TCatalogObjectType;
-import com.cloudera.impala.thrift.TTable;
-import com.cloudera.impala.thrift.TTableDescriptor;
-import com.cloudera.impala.thrift.TTableType;
+import org.apache.impala.analysis.ParseNode;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.analysis.SqlParser;
+import org.apache.impala.analysis.SqlScanner;
+import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TTable;
+import org.apache.impala.thrift.TTableDescriptor;
+import org.apache.impala.thrift.TTableType;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/delegates/DdlDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/delegates/DdlDelegate.java b/fe/src/main/java/org/apache/impala/catalog/delegates/DdlDelegate.java
index a21bd90..6c3ba8e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/delegates/DdlDelegate.java
+++ b/fe/src/main/java/org/apache/impala/catalog/delegates/DdlDelegate.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog.delegates;
+package org.apache.impala.catalog.delegates;
 
 import java.util.List;
 
-import com.cloudera.impala.thrift.TDistributeParam;
+import org.apache.impala.thrift.TDistributeParam;
 import org.apache.hadoop.hive.metastore.api.Table;
 
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TAlterTableParams;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TAlterTableParams;
 
 /**
  * Abstract class to implement the storage specific portion of DDL requests.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/delegates/KuduDdlDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/delegates/KuduDdlDelegate.java b/fe/src/main/java/org/apache/impala/catalog/delegates/KuduDdlDelegate.java
index ecfeb1a..8410868 100644
--- a/fe/src/main/java/org/apache/impala/catalog/delegates/KuduDdlDelegate.java
+++ b/fe/src/main/java/org/apache/impala/catalog/delegates/KuduDdlDelegate.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog.delegates;
+package org.apache.impala.catalog.delegates;
 
-import static com.cloudera.impala.util.KuduUtil.compareSchema;
-import static com.cloudera.impala.util.KuduUtil.fromImpalaType;
-import static com.cloudera.impala.util.KuduUtil.parseKeyColumns;
+import static org.apache.impala.util.KuduUtil.compareSchema;
+import static org.apache.impala.util.KuduUtil.fromImpalaType;
+import static org.apache.impala.util.KuduUtil.parseKeyColumns;
 
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -38,10 +38,10 @@ import org.apache.kudu.client.PartialRow;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TDistributeParam;
-import com.cloudera.impala.util.KuduUtil;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TDistributeParam;
+import org.apache.impala.util.KuduUtil;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
@@ -100,7 +100,7 @@ public class KuduDdlDelegate extends DdlDelegate {
       // Create a new Schema and map the types accordingly
       ArrayList<ColumnSchema> columns = Lists.newArrayList();
       for (FieldSchema fieldSchema: msTbl_.getSd().getCols()) {
-        com.cloudera.impala.catalog.Type catalogType = com.cloudera.impala.catalog.Type
+        org.apache.impala.catalog.Type catalogType = org.apache.impala.catalog.Type
             .parseColumnType(fieldSchema.getType());
         if (catalogType == null) {
           throw new ImpalaRuntimeException(String.format(

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/catalog/delegates/UnsupportedOpDelegate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/delegates/UnsupportedOpDelegate.java b/fe/src/main/java/org/apache/impala/catalog/delegates/UnsupportedOpDelegate.java
index 52267be..8aabaa4 100644
--- a/fe/src/main/java/org/apache/impala/catalog/delegates/UnsupportedOpDelegate.java
+++ b/fe/src/main/java/org/apache/impala/catalog/delegates/UnsupportedOpDelegate.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.catalog.delegates;
+package org.apache.impala.catalog.delegates;
 
-import com.cloudera.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.ImpalaRuntimeException;
 
 /**
  * Empty implementation for the DdlDelegate interface that does nothing.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/AliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/AliasGenerator.java b/fe/src/main/java/org/apache/impala/common/AliasGenerator.java
index 2d75d18..b199b54 100644
--- a/fe/src/main/java/org/apache/impala/common/AliasGenerator.java
+++ b/fe/src/main/java/org/apache/impala/common/AliasGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.util.Set;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/AnalysisException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/AnalysisException.java b/fe/src/main/java/org/apache/impala/common/AnalysisException.java
index 464cfa0..cbc4f00 100644
--- a/fe/src/main/java/org/apache/impala/common/AnalysisException.java
+++ b/fe/src/main/java/org/apache/impala/common/AnalysisException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Thrown for errors encountered during analysis of a SQL statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/ByteUnits.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/ByteUnits.java b/fe/src/main/java/org/apache/impala/common/ByteUnits.java
index f8b43ab..51aeb2f 100644
--- a/fe/src/main/java/org/apache/impala/common/ByteUnits.java
+++ b/fe/src/main/java/org/apache/impala/common/ByteUnits.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Byte unit constants.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/ColumnAliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/ColumnAliasGenerator.java b/fe/src/main/java/org/apache/impala/common/ColumnAliasGenerator.java
index 51c8ece..49c0f7d 100644
--- a/fe/src/main/java/org/apache/impala/common/ColumnAliasGenerator.java
+++ b/fe/src/main/java/org/apache/impala/common/ColumnAliasGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index 2239853..fbf8478 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/Id.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/Id.java b/fe/src/main/java/org/apache/impala/common/Id.java
index 88f626f..3eee186 100644
--- a/fe/src/main/java/org/apache/impala/common/Id.java
+++ b/fe/src/main/java/org/apache/impala/common/Id.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.util.ArrayList;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/IdGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/IdGenerator.java b/fe/src/main/java/org/apache/impala/common/IdGenerator.java
index cdece7d..f0991f1 100644
--- a/fe/src/main/java/org/apache/impala/common/IdGenerator.java
+++ b/fe/src/main/java/org/apache/impala/common/IdGenerator.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Generator of consecutively numbered integers to be used as ids by subclasses of Id.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/ImpalaException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/ImpalaException.java b/fe/src/main/java/org/apache/impala/common/ImpalaException.java
index 6a50bf8..100f682 100644
--- a/fe/src/main/java/org/apache/impala/common/ImpalaException.java
+++ b/fe/src/main/java/org/apache/impala/common/ImpalaException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/ImpalaRuntimeException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/ImpalaRuntimeException.java b/fe/src/main/java/org/apache/impala/common/ImpalaRuntimeException.java
index 27a8925..8b74c68 100644
--- a/fe/src/main/java/org/apache/impala/common/ImpalaRuntimeException.java
+++ b/fe/src/main/java/org/apache/impala/common/ImpalaRuntimeException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Thrown for errors encountered during the execution of a SQL statement.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/InternalException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/InternalException.java b/fe/src/main/java/org/apache/impala/common/InternalException.java
index 64a8d5e..775a532 100644
--- a/fe/src/main/java/org/apache/impala/common/InternalException.java
+++ b/fe/src/main/java/org/apache/impala/common/InternalException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Thrown for internal server errors.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/JniUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/JniUtil.java b/fe/src/main/java/org/apache/impala/common/JniUtil.java
index 2f11c52..4446a55 100644
--- a/fe/src/main/java/org/apache/impala/common/JniUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/JniUtil.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.io.IOException;
 import java.io.PrintWriter;
@@ -33,9 +33,9 @@ import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TProtocolFactory;
 
-import com.cloudera.impala.thrift.TGetJvmMetricsRequest;
-import com.cloudera.impala.thrift.TGetJvmMetricsResponse;
-import com.cloudera.impala.thrift.TJvmMemoryPool;
+import org.apache.impala.thrift.TGetJvmMetricsRequest;
+import org.apache.impala.thrift.TGetJvmMetricsResponse;
+import org.apache.impala.thrift.TJvmMemoryPool;
 
 /**
  * Utility class with methods intended for JNI clients

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/NotImplementedException.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/NotImplementedException.java b/fe/src/main/java/org/apache/impala/common/NotImplementedException.java
index b06a130..8549242 100644
--- a/fe/src/main/java/org/apache/impala/common/NotImplementedException.java
+++ b/fe/src/main/java/org/apache/impala/common/NotImplementedException.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * Thrown for SQL statements that require as yet unimplemented functionality.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/Pair.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/Pair.java b/fe/src/main/java/org/apache/impala/common/Pair.java
index c17c535..685d8e5 100644
--- a/fe/src/main/java/org/apache/impala/common/Pair.java
+++ b/fe/src/main/java/org/apache/impala/common/Pair.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * The equivalent of C++'s std::pair<>.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/PrintUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/PrintUtils.java b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
index 78e0d6d..3062161 100644
--- a/fe/src/main/java/org/apache/impala/common/PrintUtils.java
+++ b/fe/src/main/java/org/apache/impala/common/PrintUtils.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
-import static com.cloudera.impala.common.ByteUnits.GIGABYTE;
-import static com.cloudera.impala.common.ByteUnits.KILOBYTE;
-import static com.cloudera.impala.common.ByteUnits.MEGABYTE;
-import static com.cloudera.impala.common.ByteUnits.PETABYTE;
-import static com.cloudera.impala.common.ByteUnits.TERABYTE;
+import static org.apache.impala.common.ByteUnits.GIGABYTE;
+import static org.apache.impala.common.ByteUnits.KILOBYTE;
+import static org.apache.impala.common.ByteUnits.MEGABYTE;
+import static org.apache.impala.common.ByteUnits.PETABYTE;
+import static org.apache.impala.common.ByteUnits.TERABYTE;
 
 import java.text.DecimalFormat;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/Reference.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/Reference.java b/fe/src/main/java/org/apache/impala/common/Reference.java
index aff1dae..a157b47 100644
--- a/fe/src/main/java/org/apache/impala/common/Reference.java
+++ b/fe/src/main/java/org/apache/impala/common/Reference.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 /**
  * "Indirection layer" that allows returning an object via an output

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java b/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
index febd657..af58b16 100644
--- a/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
+++ b/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TStartupOptions;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TStartupOptions;
 
 /**
  * Contains runtime-specific parameters such as the number of CPU cores. Currently only

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/TableAliasGenerator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/TableAliasGenerator.java b/fe/src/main/java/org/apache/impala/common/TableAliasGenerator.java
index 6ded288..8fe656c 100644
--- a/fe/src/main/java/org/apache/impala/common/TableAliasGenerator.java
+++ b/fe/src/main/java/org/apache/impala/common/TableAliasGenerator.java
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
-import com.cloudera.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Analyzer;
 
 import com.google.common.base.Preconditions;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/common/TreeNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/TreeNode.java b/fe/src/main/java/org/apache/impala/common/TreeNode.java
index ac8f297..adaee18 100644
--- a/fe/src/main/java/org/apache/impala/common/TreeNode.java
+++ b/fe/src/main/java/org/apache/impala/common/TreeNode.java
@@ -15,13 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.common;
+package org.apache.impala.common;
 
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import com.cloudera.impala.util.Visitor;
+import org.apache.impala.util.Visitor;
 import com.google.common.base.Predicate;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/extdatasource/ApiVersion.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/extdatasource/ApiVersion.java b/fe/src/main/java/org/apache/impala/extdatasource/ApiVersion.java
index d2285a4..8d4af15 100644
--- a/fe/src/main/java/org/apache/impala/extdatasource/ApiVersion.java
+++ b/fe/src/main/java/org/apache/impala/extdatasource/ApiVersion.java
@@ -15,16 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource;
+package org.apache.impala.extdatasource;
 
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
+import org.apache.impala.extdatasource.v1.ExternalDataSource;
 import com.google.common.base.Strings;
 
 /**
  * Enumerates the valid versions of the {@link ExternalDataSource} API.
  */
 public enum ApiVersion {
-  V1(com.cloudera.impala.extdatasource.v1.ExternalDataSource.class);
+  V1(org.apache.impala.extdatasource.v1.ExternalDataSource.class);
 
   private final Class<?> apiInterface_;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
index 3af470f..49d9426 100644
--- a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
+++ b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.extdatasource;
+package org.apache.impala.extdatasource;
 
 import java.io.File;
 import java.lang.reflect.Constructor;
@@ -30,21 +30,21 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.extdatasource.thrift.TCloseParams;
-import com.cloudera.impala.extdatasource.thrift.TCloseResult;
-import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
-import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
-import com.cloudera.impala.extdatasource.thrift.TOpenParams;
-import com.cloudera.impala.extdatasource.thrift.TOpenResult;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TStatus;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.extdatasource.thrift.TCloseParams;
+import org.apache.impala.extdatasource.thrift.TCloseResult;
+import org.apache.impala.extdatasource.thrift.TGetNextParams;
+import org.apache.impala.extdatasource.thrift.TGetNextResult;
+import org.apache.impala.extdatasource.thrift.TOpenParams;
+import org.apache.impala.extdatasource.thrift.TOpenResult;
+import org.apache.impala.extdatasource.thrift.TPrepareParams;
+import org.apache.impala.extdatasource.thrift.TPrepareResult;
+import org.apache.impala.extdatasource.v1.ExternalDataSource;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TStatus;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBigIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBigIntWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBigIntWritable.java
index f410ddf..a7fde97 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBigIntWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBigIntWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.LongWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaBigIntWritable extends LongWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBooleanWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBooleanWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBooleanWritable.java
index f20f49e..9720715 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBooleanWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBooleanWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.BooleanWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaBooleanWritable extends BooleanWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBytesWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBytesWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBytesWritable.java
index ce6fc0c..94824e0 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBytesWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaBytesWritable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.BytesWritable;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaDoubleWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaDoubleWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaDoubleWritable.java
index cc96895..2a0bd16 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaDoubleWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaDoubleWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaDoubleWritable extends DoubleWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaFloatWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaFloatWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaFloatWritable.java
index b68ee69..c39f596 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaFloatWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaFloatWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.FloatWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaFloatWritable extends FloatWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaIntWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaIntWritable.java
index afe59c4..a084dc9 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaIntWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaIntWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.IntWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaIntWritable extends IntWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaSmallIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaSmallIntWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaSmallIntWritable.java
index 5a8e346..c79e0d8 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaSmallIntWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaSmallIntWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaSmallIntWritable extends ShortWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaStringWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaStringWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaStringWritable.java
index a9dca74..2ef6adf 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaStringWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaStringWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import java.nio.ByteBuffer;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTextWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTextWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTextWritable.java
index e17a4df..bae4537 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTextWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTextWritable.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.io.Text;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTinyIntWritable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTinyIntWritable.java b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTinyIntWritable.java
index 17a7b14..a7315ac 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTinyIntWritable.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/ImpalaTinyIntWritable.java
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.util.UnsafeUtil;
 
 @SuppressWarnings("restriction")
 public class ImpalaTinyIntWritable extends ByteWritable {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/hive/executor/UdfExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/hive/executor/UdfExecutor.java b/fe/src/main/java/org/apache/impala/hive/executor/UdfExecutor.java
index 242c704..df48cb7 100644
--- a/fe/src/main/java/org/apache/impala/hive/executor/UdfExecutor.java
+++ b/fe/src/main/java/org/apache/impala/hive/executor/UdfExecutor.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.hive.executor;
+package org.apache.impala.hive.executor;
 
 import java.io.File;
 import java.lang.reflect.Constructor;
@@ -40,14 +40,14 @@ import org.apache.hadoop.io.Writable;
 import org.apache.log4j.Logger;
 import org.apache.thrift.protocol.TBinaryProtocol;
 
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.THiveUdfExecutorCtorParams;
-import com.cloudera.impala.thrift.TPrimitiveType;
-import com.cloudera.impala.util.UnsafeUtil;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.common.JniUtil;
+import org.apache.impala.thrift.THiveUdfExecutorCtorParams;
+import org.apache.impala.thrift.TPrimitiveType;
+import org.apache.impala.util.UnsafeUtil;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
index f6bf8a0..57dbd8f 100644
--- a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,18 +24,18 @@ import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.FunctionCallExpr;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TAggregationNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.AggregateInfo;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.FunctionCallExpr;
+import org.apache.impala.analysis.SlotId;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TAggregationNode;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
index ccbdaa2..6e07e79 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
@@ -15,24 +15,24 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AnalyticWindow;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.OrderByElement;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.thrift.TAnalyticNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.AnalyticWindow;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.OrderByElement;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.thrift.TAnalyticNode;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
index c02096e..e44fc0b 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.Collections;
 import java.util.Comparator;
@@ -24,27 +24,27 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AggregateInfoBase;
-import com.cloudera.impala.analysis.AnalyticExpr;
-import com.cloudera.impala.analysis.AnalyticInfo;
-import com.cloudera.impala.analysis.AnalyticWindow;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.CompoundPredicate.Operator;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.OrderByElement;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.analysis.TupleIsNullPredicate;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TPartitionType;
+import org.apache.impala.analysis.AggregateInfoBase;
+import org.apache.impala.analysis.AnalyticExpr;
+import org.apache.impala.analysis.AnalyticInfo;
+import org.apache.impala.analysis.AnalyticWindow;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.BoolLiteral;
+import org.apache.impala.analysis.CompoundPredicate;
+import org.apache.impala.analysis.CompoundPredicate.Operator;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.IsNullPredicate;
+import org.apache.impala.analysis.OrderByElement;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.SortInfo;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.analysis.TupleIsNullPredicate;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TPartitionType;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/CohortId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/CohortId.java b/fe/src/main/java/org/apache/impala/planner/CohortId.java
index d58e5c4..f7c741d 100644
--- a/fe/src/main/java/org/apache/impala/planner/CohortId.java
+++ b/fe/src/main/java/org/apache/impala/planner/CohortId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class CohortId extends Id<CohortId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/DataPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataPartition.java b/fe/src/main/java/org/apache/impala/planner/DataPartition.java
index 3320c2b..f0f16fa 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataPartition.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataPartition.java
@@ -15,18 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.thrift.TDataPartition;
-import com.cloudera.impala.thrift.TPartitionType;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.thrift.TDataPartition;
+import org.apache.impala.thrift.TPartitionType;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/DataSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataSink.java b/fe/src/main/java/org/apache/impala/planner/DataSink.java
index ff81b50..b1977f4 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataSink.java
@@ -15,17 +15,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TExplainLevel;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TExplainLevel;
 
 /**
  * A DataSink describes the destination of a plan fragment's output rows.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
index ab92605..1f5665f 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
@@ -15,47 +15,47 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.DataSource;
-import com.cloudera.impala.catalog.DataSourceTable;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.extdatasource.ExternalDataSourceExecutor;
-import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
-import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
-import com.cloudera.impala.extdatasource.thrift.TComparisonOp;
-import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
-import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TCacheJarResult;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TDataSourceScanNode;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.cloudera.impala.thrift.TStatus;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.BoolLiteral;
+import org.apache.impala.analysis.CompoundPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NumericLiteral;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.StringLiteral;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.DataSourceTable;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.extdatasource.ExternalDataSourceExecutor;
+import org.apache.impala.extdatasource.thrift.TBinaryPredicate;
+import org.apache.impala.extdatasource.thrift.TColumnDesc;
+import org.apache.impala.extdatasource.thrift.TComparisonOp;
+import org.apache.impala.extdatasource.thrift.TPrepareParams;
+import org.apache.impala.extdatasource.thrift.TPrepareResult;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TCacheJarResult;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TDataSourceScanNode;
+import org.apache.impala.thrift.TErrorCode;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TScanRange;
+import org.apache.impala.thrift.TScanRangeLocation;
+import org.apache.impala.thrift.TScanRangeLocations;
+import org.apache.impala.thrift.TStatus;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/DataStreamSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataStreamSink.java b/fe/src/main/java/org/apache/impala/planner/DataStreamSink.java
index 514a791..952215e 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataStreamSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataStreamSink.java
@@ -15,12 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TDataStreamSink;
-import com.cloudera.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TDataSinkType;
+import org.apache.impala.thrift.TDataStreamSink;
+import org.apache.impala.thrift.TExplainLevel;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
index b38b018..620ea56 100644
--- a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -23,17 +23,17 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.AggregateInfo;
-import com.cloudera.impala.analysis.AnalysisContext;
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.InsertStmt;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.QueryStmt;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.planner.JoinNode.DistributionMode;
-import com.cloudera.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
+import org.apache.impala.analysis.AggregateInfo;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.InsertStmt;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.analysis.QueryStmt;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.planner.JoinNode.DistributionMode;
+import org.apache.impala.planner.RuntimeFilterGenerator.RuntimeFilter;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java b/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
index ed9dc70..2b46bd9 100644
--- a/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/EmptySetNode.java
@@ -15,15 +15,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java b/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
index eeef5fe..658aa76 100644
--- a/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/ExchangeNode.java
@@ -15,21 +15,21 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExchangeNode;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TSortInfo;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.SortInfo;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TExchangeNode;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TSortInfo;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
index e31372d..f299131 100644
--- a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -34,32 +34,32 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HBaseColumn;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.PrimitiveType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THBaseFilter;
-import com.cloudera.impala.thrift.THBaseKeyRange;
-import com.cloudera.impala.thrift.THBaseScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.StringLiteral;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.HBaseColumn;
+import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.PrimitiveType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.THBaseFilter;
+import org.apache.impala.thrift.THBaseKeyRange;
+import org.apache.impala.thrift.THBaseScanNode;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TScanRange;
+import org.apache.impala.thrift.TScanRangeLocation;
+import org.apache.impala.thrift.TScanRangeLocations;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java b/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
index 2a0d1b7..aa77529 100644
--- a/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/HBaseTableSink.java
@@ -16,15 +16,15 @@
 // under the License.
 
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TDataSinkType;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TTableSink;
+import org.apache.impala.thrift.TTableSinkType;
 
 /**
  * Class used to represent a Sink that will transport

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HashJoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HashJoinNode.java b/fe/src/main/java/org/apache/impala/planner/HashJoinNode.java
index 906f732..07cfe9f 100644
--- a/fe/src/main/java/org/apache/impala/planner/HashJoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HashJoinNode.java
@@ -15,28 +15,28 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TEqJoinCondition;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THashJoinNode;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.thrift.TEqJoinCondition;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.THashJoinNode;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
index d1710aa..3345c1b 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -24,19 +24,19 @@ import java.util.HashSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.ExprSubstitutionMap;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotId;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.FeSupport;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TResultRow;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index 9606dc5..5efd474 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -29,24 +29,24 @@ import java.util.TreeMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BetweenPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BinaryPredicate.Operator;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.InPredicate;
-import com.cloudera.impala.analysis.IsNullPredicate;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotId;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BetweenPredicate;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.BinaryPredicate.Operator;
+import org.apache.impala.analysis.CompoundPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.InPredicate;
+import org.apache.impala.analysis.IsNullPredicate;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotId;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 9b83902..4052867 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -25,36 +25,36 @@ import java.util.Map;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TableRef;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsPartition.FileBlock;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.common.RuntimeEnv;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.THdfsFileBlock;
-import com.cloudera.impala.thrift.THdfsFileSplit;
-import com.cloudera.impala.thrift.THdfsScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TReplicaPreference;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
-import com.cloudera.impala.util.MembershipSnapshot;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.TableRef;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.analysis.TupleId;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.HdfsPartition.FileBlock;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.NotImplementedException;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TExpr;
+import org.apache.impala.thrift.THdfsFileBlock;
+import org.apache.impala.thrift.THdfsFileSplit;
+import org.apache.impala.thrift.THdfsScanNode;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
+import org.apache.impala.thrift.TReplicaPreference;
+import org.apache.impala.thrift.TScanRange;
+import org.apache.impala.thrift.TScanRangeLocation;
+import org.apache.impala.thrift.TScanRangeLocations;
+import org.apache.impala.util.MembershipSnapshot;
 import com.google.common.base.Joiner;
 import com.google.common.base.Objects;
 import com.google.common.base.Objects.ToStringHelper;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
index 7b97773..3a217b9 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsTableSink.java
@@ -15,21 +15,21 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.THdfsTableSink;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.catalog.HdfsFileFormat;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TDataSinkType;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.THdfsTableSink;
+import org.apache.impala.thrift.TTableSink;
+import org.apache.impala.thrift.TTableSinkType;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/JoinBuildSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinBuildSink.java b/fe/src/main/java/org/apache/impala/planner/JoinBuildSink.java
index 25da277..e214fa6 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinBuildSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinBuildSink.java
@@ -15,24 +15,24 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TJoinBuildSink;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TDataSinkType;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TJoinBuildSink;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/JoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinNode.java b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
index ebc9b51..3362047 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.Collections;
 import java.util.List;
@@ -23,15 +23,15 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.ImpalaException;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.catalog.ColumnStats;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.ImpalaException;
 import com.google.common.base.Preconditions;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/JoinTableId.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinTableId.java b/fe/src/main/java/org/apache/impala/planner/JoinTableId.java
index 5cf7a2b..5f96c90 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinTableId.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinTableId.java
@@ -15,10 +15,10 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
-import com.cloudera.impala.common.Id;
-import com.cloudera.impala.common.IdGenerator;
+import org.apache.impala.common.Id;
+import org.apache.impala.common.IdGenerator;
 
 public class JoinTableId extends Id<JoinTableId> {
   // Construction only allowed via an IdGenerator.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
index 4f654a9..a2473ae 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.io.IOException;
 import java.util.List;
@@ -34,27 +34,27 @@ import org.apache.kudu.client.LocatedTablet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.BoolLiteral;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.LiteralExpr;
-import com.cloudera.impala.analysis.NullLiteral;
-import com.cloudera.impala.analysis.NumericLiteral;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.StringLiteral;
-import com.cloudera.impala.analysis.TupleDescriptor;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TKuduScanNode;
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TScanRange;
-import com.cloudera.impala.thrift.TScanRangeLocation;
-import com.cloudera.impala.thrift.TScanRangeLocations;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.BoolLiteral;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NullLiteral;
+import org.apache.impala.analysis.NumericLiteral;
+import org.apache.impala.analysis.SlotDescriptor;
+import org.apache.impala.analysis.SlotRef;
+import org.apache.impala.analysis.StringLiteral;
+import org.apache.impala.analysis.TupleDescriptor;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.common.ImpalaRuntimeException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TKuduScanNode;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TScanRange;
+import org.apache.impala.thrift.TScanRangeLocation;
+import org.apache.impala.thrift.TScanRangeLocations;
 import com.google.common.base.Charsets;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java b/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
index 8e8ac63..2fb9af7 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduTableSink.java
@@ -16,19 +16,19 @@
 // under the License.
 
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.ArrayList;
 import java.util.List;
 
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TDataSink;
-import com.cloudera.impala.thrift.TDataSinkType;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TKuduTableSink;
-import com.cloudera.impala.thrift.TTableSink;
-import com.cloudera.impala.thrift.TTableSinkType;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.PrintUtils;
+import org.apache.impala.thrift.TDataSink;
+import org.apache.impala.thrift.TDataSinkType;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TKuduTableSink;
+import org.apache.impala.thrift.TTableSink;
+import org.apache.impala.thrift.TTableSinkType;
 import com.google.common.collect.Lists;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b2c2fe78/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java b/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
index e989438..cbaf370 100644
--- a/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/NestedLoopJoinNode.java
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloudera.impala.planner;
+package org.apache.impala.planner;
 
 import java.util.Collections;
 import java.util.List;
@@ -23,16 +23,16 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.JoinOperator;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TNestedLoopJoinNode;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
+import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.BinaryPredicate;
+import org.apache.impala.analysis.Expr;
+import org.apache.impala.analysis.JoinOperator;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TExplainLevel;
+import org.apache.impala.thrift.TNestedLoopJoinNode;
+import org.apache.impala.thrift.TPlanNode;
+import org.apache.impala.thrift.TPlanNodeType;
+import org.apache.impala.thrift.TQueryOptions;
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;