You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:14:57 UTC

[40/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
deleted file mode 100644
index c5965db..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/InsertStmt.java
+++ /dev/null
@@ -1,697 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import com.cloudera.impala.planner.TableSink;
-import com.google.common.collect.ImmutableList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.planner.DataSink;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Representation of a single insert statement, including the select statement
- * whose results are to be inserted.
- */
-public class InsertStmt extends StatementBase {
-  private final static Logger LOG = LoggerFactory.getLogger(InsertStmt.class);
-
-  // Target table name as seen by the parser
-  private final TableName originalTableName_;
-
-  // Differentiates between INSERT INTO and INSERT OVERWRITE.
-  private final boolean overwrite_;
-
-  // List of column:value elements from the PARTITION (...) clause.
-  // Set to null if no partition was given.
-  private final List<PartitionKeyValue> partitionKeyValues_;
-
-  // User-supplied hints to control hash partitioning before the table sink in the plan.
-  private final List<String> planHints_;
-
-  // False if the original insert statement had a query statement, true if we need to
-  // auto-generate one (for insert into tbl()) during analysis.
-  private final boolean needsGeneratedQueryStatement_;
-
-  // The column permutation is specified by writing INSERT INTO tbl(col3, col1, col2...)
-  //
-  // It is a mapping from select-list expr index to (non-partition) output column. If
-  // null, will be set to the default permutation of all non-partition columns in Hive
-  // order.
-  //
-  // A column is said to be 'mentioned' if it occurs either in the column permutation, or
-  // the PARTITION clause. If columnPermutation is null, all non-partition columns are
-  // considered mentioned.
-  //
-  // Between them, the columnPermutation and the set of partitionKeyValues must mention to
-  // every partition column in the target table exactly once. Other columns, if not
-  // explicitly mentioned, will be assigned NULL values. Partition columns are not
-  // defaulted to NULL by design, and are not just for NULL-valued partition slots.
-  //
-  // Dynamic partition keys may occur in either the permutation or the PARTITION
-  // clause. Partition columns with static values may only be mentioned in the PARTITION
-  // clause, where the static value is specified.
-  private final List<String> columnPermutation_;
-
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  // List of inline views that may be referenced in queryStmt.
-  private final WithClause withClause_;
-
-  // Target table into which to insert. May be qualified by analyze()
-  private TableName targetTableName_;
-
-  // Select or union whose results are to be inserted. If null, will be set after
-  // analysis.
-  private QueryStmt queryStmt_;
-
-  // Set in analyze(). Contains metadata of target table to determine type of sink.
-  private Table table_;
-
-  // Set in analyze(). Exprs corresponding to the partitionKeyValues,
-  private List<Expr> partitionKeyExprs_ = Lists.newArrayList();
-
-  // Indicates whether this insert stmt has a shuffle or noshuffle plan hint.
-  // Both flags may be false. Only one of them may be true, not both.
-  // Shuffle forces data repartitioning before then data sink, and noshuffle
-  // prevents it. Set in analyze() based on planHints_.
-  private boolean hasShuffleHint_ = false;
-  private boolean hasNoShuffleHint_ = false;
-
-  // Output expressions that produce the final results to write to the target table. May
-  // include casts, and NullLiterals where an output column isn't explicitly mentioned.
-  // Set in prepareExpressions(). The i'th expr produces the i'th column of the target
-  // table.
-  private ArrayList<Expr> resultExprs_ = Lists.newArrayList();
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  // For tables with primary keys, indicates if duplicate key errors are ignored.
-  private final boolean ignoreDuplicates_;
-
-  public InsertStmt(WithClause withClause, TableName targetTable, boolean overwrite,
-      List<PartitionKeyValue> partitionKeyValues, List<String> planHints,
-      QueryStmt queryStmt, List<String> columnPermutation, boolean ignoreDuplicates) {
-    withClause_ = withClause;
-    targetTableName_ = targetTable;
-    originalTableName_ = targetTableName_;
-    overwrite_ = overwrite;
-    partitionKeyValues_ = partitionKeyValues;
-    planHints_ = planHints;
-    queryStmt_ = queryStmt;
-    needsGeneratedQueryStatement_ = (queryStmt == null);
-    columnPermutation_ = columnPermutation;
-    table_ = null;
-    ignoreDuplicates_ = ignoreDuplicates;
-  }
-
-  /**
-   * C'tor used in clone().
-   */
-  private InsertStmt(InsertStmt other) {
-    super(other);
-    withClause_ = other.withClause_ != null ? other.withClause_.clone() : null;
-    targetTableName_ = other.targetTableName_;
-    originalTableName_ = other.originalTableName_;
-    overwrite_ = other.overwrite_;
-    partitionKeyValues_ = other.partitionKeyValues_;
-    planHints_ = other.planHints_;
-    queryStmt_ = other.queryStmt_ != null ? other.queryStmt_.clone() : null;
-    needsGeneratedQueryStatement_ = other.needsGeneratedQueryStatement_;
-    columnPermutation_ = other.columnPermutation_;
-    table_ = other.table_;
-    ignoreDuplicates_ = other.ignoreDuplicates_;
-  }
-
-  @Override
-  public void reset() {
-    super.reset();
-    if (withClause_ != null) withClause_.reset();
-    targetTableName_ = originalTableName_;
-    queryStmt_.reset();
-    table_ = null;
-    partitionKeyExprs_.clear();
-    hasShuffleHint_ = false;
-    hasNoShuffleHint_ = false;
-    resultExprs_.clear();
-  }
-
-  @Override
-  public InsertStmt clone() { return new InsertStmt(this); }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed()) return;
-    super.analyze(analyzer);
-    try {
-      if (withClause_ != null) withClause_.analyze(analyzer);
-    } catch (AnalysisException e) {
-      // Ignore AnalysisExceptions if tables are missing to ensure the maximum number
-      // of missing tables can be collected before failing analyze().
-      if (analyzer.getMissingTbls().isEmpty()) throw e;
-    }
-
-    List<Expr> selectListExprs = null;
-    if (!needsGeneratedQueryStatement_) {
-      try {
-        // Use a child analyzer for the query stmt to properly scope WITH-clause
-        // views and to ignore irrelevant ORDER BYs.
-        Analyzer queryStmtAnalyzer = new Analyzer(analyzer);
-        queryStmt_.analyze(queryStmtAnalyzer);
-        // Subqueries need to be rewritten by the StmtRewriter first.
-        if (analyzer.containsSubquery()) return;
-        // Use getResultExprs() and not getBaseTblResultExprs() here because the final
-        // substitution with TupleIsNullPredicate() wrapping happens in planning.
-        selectListExprs = Expr.cloneList(queryStmt_.getResultExprs());
-      } catch (AnalysisException e) {
-        if (analyzer.getMissingTbls().isEmpty()) throw e;
-      }
-    } else {
-      selectListExprs = Lists.newArrayList();
-    }
-
-    // Set target table and perform table-type specific analysis and auth checking.
-    // Also checks if the target table is missing.
-    setTargetTable(analyzer);
-
-    // Abort analysis if there are any missing tables beyond this point.
-    if (!analyzer.getMissingTbls().isEmpty()) {
-      throw new AnalysisException("Found missing tables. Aborting analysis.");
-    }
-
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-
-    // Analysis of the INSERT statement from this point is basically the act of matching
-    // the set of output columns (which come from a column permutation, perhaps
-    // implicitly, and the PARTITION clause) to the set of input columns (which come from
-    // the select-list and any statically-valued columns in the PARTITION clause).
-    //
-    // First, we compute the set of mentioned columns, and reject statements that refer to
-    // non-existent columns, or duplicates (we must check both the column permutation, and
-    // the set of partition keys). Next, we check that all partition columns are
-    // mentioned. During this process we build the map from select-list expr index to
-    // column in the targeted table.
-    //
-    // Then we check that the select-list contains exactly the right number of expressions
-    // for all mentioned columns which are not statically-valued partition columns (which
-    // get their expressions from partitionKeyValues).
-    //
-    // Finally, prepareExpressions analyzes the expressions themselves, and confirms that
-    // they are type-compatible with the target columns. Where columns are not mentioned
-    // (and by this point, we know that missing columns are not partition columns),
-    // prepareExpressions assigns them a NULL literal expressions.
-
-    // An null permutation clause is the same as listing all non-partition columns in
-    // order.
-    List<String> analysisColumnPermutation = columnPermutation_;
-    if (analysisColumnPermutation == null) {
-      analysisColumnPermutation = Lists.newArrayList();
-      ArrayList<Column> tableColumns = table_.getColumns();
-      for (int i = numClusteringCols; i < tableColumns.size(); ++i) {
-        analysisColumnPermutation.add(tableColumns.get(i).getName());
-      }
-    }
-
-    // selectExprTargetColumns maps from select expression index to a column in the target
-    // table. It will eventually include all mentioned columns that aren't static-valued
-    // partition columns.
-    ArrayList<Column> selectExprTargetColumns = Lists.newArrayList();
-
-    // Tracks the name of all columns encountered in either the permutation clause or the
-    // partition clause to detect duplicates.
-    Set<String> mentionedColumnNames = Sets.newHashSet();
-    for (String columnName: analysisColumnPermutation) {
-      Column column = table_.getColumn(columnName);
-      if (column == null) {
-        throw new AnalysisException(
-            "Unknown column '" + columnName + "' in column permutation");
-      }
-
-      if (!mentionedColumnNames.add(columnName)) {
-        throw new AnalysisException(
-            "Duplicate column '" + columnName + "' in column permutation");
-      }
-      selectExprTargetColumns.add(column);
-    }
-
-    int numStaticPartitionExprs = 0;
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        Column column = table_.getColumn(pkv.getColName());
-        if (column == null) {
-          throw new AnalysisException("Unknown column '" + pkv.getColName() +
-                                      "' in partition clause");
-        }
-
-        if (column.getPosition() >= numClusteringCols) {
-          throw new AnalysisException(
-              "Column '" + pkv.getColName() + "' is not a partition column");
-        }
-
-        if (!mentionedColumnNames.add(pkv.getColName())) {
-          throw new AnalysisException(
-              "Duplicate column '" + pkv.getColName() + "' in partition clause");
-        }
-        if (!pkv.isDynamic()) {
-          numStaticPartitionExprs++;
-        } else {
-          selectExprTargetColumns.add(column);
-        }
-      }
-    }
-
-    // Checks that exactly all columns in the target table are assigned an expr.
-    checkColumnCoverage(selectExprTargetColumns, mentionedColumnNames,
-        selectListExprs.size(), numStaticPartitionExprs);
-
-    // Make sure static partition key values only contain const exprs.
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue kv: partitionKeyValues_) {
-        kv.analyze(analyzer);
-      }
-    }
-
-    // Populate partitionKeyExprs from partitionKeyValues and selectExprTargetColumns
-    prepareExpressions(selectExprTargetColumns, selectListExprs, table_, analyzer);
-    // Analyze plan hints at the end to prefer reporting other error messages first
-    // (e.g., the PARTITION clause is not applicable to unpartitioned and HBase tables).
-    analyzePlanHints(analyzer);
-  }
-
-  /**
-   * Sets table_ based on targetTableName_ and performs table-type specific analysis:
-   * - Partition clause is invalid for unpartitioned Hdfs tables and HBase tables
-   * - Overwrite is invalid for HBase tables
-   * - Check INSERT privileges as well as write access to Hdfs paths
-   * - Cannot insert into a view
-   * Adds table_ to the analyzer's descriptor table if analysis succeeds.
-   */
-  private void setTargetTable(Analyzer analyzer) throws AnalysisException {
-    // If the table has not yet been set, load it from the Catalog. This allows for
-    // callers to set a table to analyze that may not actually be created in the Catalog.
-    // One example use case is CREATE TABLE AS SELECT which must run analysis on the
-    // INSERT before the table has actually been created.
-    if (table_ == null) {
-      if (!targetTableName_.isFullyQualified()) {
-        targetTableName_ =
-            new TableName(analyzer.getDefaultDb(), targetTableName_.getTbl());
-      }
-      table_ = analyzer.getTable(targetTableName_, Privilege.INSERT);
-    } else {
-      targetTableName_ = new TableName(table_.getDb().getName(), table_.getName());
-      PrivilegeRequestBuilder pb = new PrivilegeRequestBuilder();
-      analyzer.registerPrivReq(pb.onTable(table_.getDb().getName(), table_.getName())
-          .allOf(Privilege.INSERT).toRequest());
-    }
-
-    // We do not support inserting into views.
-    if (table_ instanceof View) {
-      throw new AnalysisException(
-          String.format("Impala does not support inserting into views: %s",
-          table_.getFullName()));
-    }
-
-    for (Column c: table_.getColumns()) {
-      if (!c.getType().isSupported()) {
-        throw new AnalysisException(String.format("Unable to INSERT into target table " +
-            "(%s) because the column '%s' has an unsupported type '%s'.",
-            targetTableName_, c.getName(), c.getType().toSql()));
-      }
-    }
-
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-
-    if (partitionKeyValues_ != null && numClusteringCols == 0) {
-      if (isHBaseTable) {
-        throw new AnalysisException("PARTITION clause is not valid for INSERT into " +
-            "HBase tables. '" + targetTableName_ + "' is an HBase table");
-
-      } else {
-        // Unpartitioned table, but INSERT has PARTITION clause
-        throw new AnalysisException("PARTITION clause is only valid for INSERT into " +
-            "partitioned table. '" + targetTableName_ + "' is not partitioned");
-      }
-    }
-
-    if (table_ instanceof HdfsTable) {
-      HdfsTable hdfsTable = (HdfsTable) table_;
-      if (!hdfsTable.hasWriteAccess()) {
-        throw new AnalysisException(String.format("Unable to INSERT into target table " +
-            "(%s) because Impala does not have WRITE access to at least one HDFS path" +
-            ": %s", targetTableName_, hdfsTable.getFirstLocationWithoutWriteAccess()));
-      }
-      StringBuilder error = new StringBuilder();
-      hdfsTable.parseSkipHeaderLineCount(error);
-      if (error.length() > 0) throw new AnalysisException(error.toString());
-      try {
-        if (!FileSystemUtil.isImpalaWritableFilesystem(hdfsTable.getLocation())) {
-          throw new AnalysisException(String.format("Unable to INSERT into target " +
-              "table (%s) because %s is not a supported filesystem.", targetTableName_,
-              hdfsTable.getLocation()));
-        }
-      } catch (IOException e) {
-        throw new AnalysisException(String.format("Unable to INSERT into target " +
-            "table (%s): %s.", targetTableName_, e.getMessage()), e);
-      }
-      for (int colIdx = 0; colIdx < numClusteringCols; ++colIdx) {
-        Column col = hdfsTable.getColumns().get(colIdx);
-        // Hive has a number of issues handling BOOLEAN partition columns (see HIVE-6590).
-        // Instead of working around the Hive bugs, INSERT is disabled for BOOLEAN
-        // partitions in Impala. Once the Hive JIRA is resolved, we can remove this
-        // analysis check.
-        if (col.getType() == Type.BOOLEAN) {
-          throw new AnalysisException(String.format("INSERT into table with BOOLEAN " +
-              "partition column (%s) is not supported: %s", col.getName(),
-              targetTableName_));
-        }
-      }
-    }
-
-    if (table_ instanceof KuduTable) {
-      if (overwrite_) {
-        throw new AnalysisException("INSERT OVERWRITE not supported for Kudu tables.");
-      }
-      if (partitionKeyValues_ != null && !partitionKeyValues_.isEmpty()) {
-        throw new AnalysisException(
-            "Partition specifications are not supported for Kudu tables.");
-      }
-    }
-
-    if (isHBaseTable && overwrite_) {
-      throw new AnalysisException("HBase doesn't have a way to perform INSERT OVERWRITE");
-    }
-
-    // Add target table to descriptor table.
-    analyzer.getDescTbl().addReferencedTable(table_);
-  }
-
-  /**
-   * Checks that the column permutation + select list + static partition exprs +
-   * dynamic partition exprs collectively cover exactly all columns in the target table
-   * (not more of fewer).
-   */
-  private void checkColumnCoverage(ArrayList<Column> selectExprTargetColumns,
-      Set<String> mentionedColumnNames, int numSelectListExprs,
-      int numStaticPartitionExprs) throws AnalysisException {
-    boolean isHBaseTable = (table_ instanceof HBaseTable);
-    int numClusteringCols = isHBaseTable ? 0 : table_.getNumClusteringCols();
-    // Check that all columns are mentioned by the permutation and partition clauses
-    if (selectExprTargetColumns.size() + numStaticPartitionExprs !=
-        table_.getColumns().size()) {
-      // We've already ruled out too many columns in the permutation and partition clauses
-      // by checking that there are no duplicates and that every column mentioned actually
-      // exists. So all columns aren't mentioned in the query. If the unmentioned columns
-      // include partition columns, this is an error.
-      List<String> missingColumnNames = Lists.newArrayList();
-      for (Column column: table_.getColumns()) {
-        if (!mentionedColumnNames.contains(column.getName())) {
-          // HBase tables have a single row-key column which is always in position 0. It
-          // must be mentioned, since it is invalid to set it to NULL (which would
-          // otherwise happen by default).
-          if (isHBaseTable && column.getPosition() == 0) {
-            throw new AnalysisException("Row-key column '" + column.getName() +
-                "' must be explicitly mentioned in column permutation.");
-          }
-          if (column.getPosition() < numClusteringCols) {
-            missingColumnNames.add(column.getName());
-          }
-        }
-      }
-
-      if (!missingColumnNames.isEmpty()) {
-        throw new AnalysisException(
-            "Not enough partition columns mentioned in query. Missing columns are: " +
-            Joiner.on(", ").join(missingColumnNames));
-      }
-    }
-
-    // Expect the selectListExpr to have entries for every target column
-    if (selectExprTargetColumns.size() != numSelectListExprs) {
-      String comparator =
-          (selectExprTargetColumns.size() < numSelectListExprs) ? "fewer" : "more";
-      String partitionClause =
-          (partitionKeyValues_ == null) ? "returns" : "and PARTITION clause return";
-
-      // If there was no column permutation provided, the error is that the select-list
-      // has the wrong number of expressions compared to the number of columns in the
-      // table. If there was a column permutation, then the mismatch is between the
-      // select-list and the permutation itself.
-      if (columnPermutation_ == null) {
-        int totalColumnsMentioned = numSelectListExprs + numStaticPartitionExprs;
-        throw new AnalysisException(String.format(
-            "Target table '%s' has %s columns (%s) than the SELECT / VALUES clause %s" +
-            " (%s)", table_.getFullName(), comparator,
-            table_.getColumns().size(), partitionClause, totalColumnsMentioned));
-      } else {
-        String partitionPrefix =
-            (partitionKeyValues_ == null) ? "mentions" : "and PARTITION clause mention";
-        throw new AnalysisException(String.format(
-            "Column permutation %s %s columns (%s) than " +
-            "the SELECT / VALUES clause %s (%s)", partitionPrefix, comparator,
-            selectExprTargetColumns.size(), partitionClause, numSelectListExprs));
-      }
-    }
-  }
-
-  /**
-   * Performs three final parts of the analysis:
-   * 1. Checks type compatibility between all expressions and their targets
-   *
-   * 2. Populates partitionKeyExprs with type-compatible expressions, in Hive
-   * partition-column order, for all partition columns
-   *
-   * 3. Populates resultExprs_ with type-compatible expressions, in Hive column order,
-   * for all expressions in the select-list. Unmentioned columns are assigned NULL literal
-   * expressions.
-   *
-   * If necessary, adds casts to the expressions to make them compatible with the type of
-   * the corresponding column.
-   *
-   * @throws AnalysisException
-   *           If an expression is not compatible with its target column
-   */
-  private void prepareExpressions(List<Column> selectExprTargetColumns,
-      List<Expr> selectListExprs, Table tbl, Analyzer analyzer)
-      throws AnalysisException {
-    // Temporary lists of partition key exprs and names in an arbitrary order.
-    List<Expr> tmpPartitionKeyExprs = new ArrayList<Expr>();
-    List<String> tmpPartitionKeyNames = new ArrayList<String>();
-
-    int numClusteringCols = (tbl instanceof HBaseTable) ? 0 : tbl.getNumClusteringCols();
-
-    // Check dynamic partition columns for type compatibility.
-    for (int i = 0; i < selectListExprs.size(); ++i) {
-      Column targetColumn = selectExprTargetColumns.get(i);
-      Expr compatibleExpr = checkTypeCompatibility(
-          targetTableName_.toString(), targetColumn, selectListExprs.get(i));
-      if (targetColumn.getPosition() < numClusteringCols) {
-        // This is a dynamic clustering column
-        tmpPartitionKeyExprs.add(compatibleExpr);
-        tmpPartitionKeyNames.add(targetColumn.getName());
-      }
-      selectListExprs.set(i, compatibleExpr);
-    }
-
-    // Check static partition columns, dynamic entries in partitionKeyValues will already
-    // be in selectExprTargetColumns and therefore are ignored in this loop
-    if (partitionKeyValues_ != null) {
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        if (pkv.isStatic()) {
-          // tableColumns is guaranteed to exist after the earlier analysis checks
-          Column tableColumn = table_.getColumn(pkv.getColName());
-          Expr compatibleExpr = checkTypeCompatibility(
-              targetTableName_.toString(), tableColumn, pkv.getValue());
-          tmpPartitionKeyExprs.add(compatibleExpr);
-          tmpPartitionKeyNames.add(pkv.getColName());
-        }
-      }
-    }
-
-    // Reorder the partition key exprs and names to be consistent with the target table
-    // declaration.  We need those exprs in the original order to create the corresponding
-    // Hdfs folder structure correctly.
-    for (Column c: table_.getColumns()) {
-      for (int j = 0; j < tmpPartitionKeyNames.size(); ++j) {
-        if (c.getName().equals(tmpPartitionKeyNames.get(j))) {
-          partitionKeyExprs_.add(tmpPartitionKeyExprs.get(j));
-          break;
-        }
-      }
-    }
-
-    Preconditions.checkState(partitionKeyExprs_.size() == numClusteringCols);
-    // Make sure we have stats for partitionKeyExprs
-    for (Expr expr: partitionKeyExprs_) {
-      expr.analyze(analyzer);
-    }
-
-    // Finally, 'undo' the permutation so that the selectListExprs are in Hive column
-    // order, and add NULL expressions to all missing columns.
-    for (Column tblColumn: table_.getColumnsInHiveOrder()) {
-      boolean matchFound = false;
-      for (int i = 0; i < selectListExprs.size(); ++i) {
-        if (selectExprTargetColumns.get(i).getName().equals(tblColumn.getName())) {
-          resultExprs_.add(selectListExprs.get(i));
-          matchFound = true;
-          break;
-        }
-      }
-      // If no match is found, either the column is a clustering column with a static
-      // value, or it was unmentioned and therefore should have a NULL select-list
-      // expression.
-      if (!matchFound) {
-        if (tblColumn.getPosition() >= numClusteringCols) {
-          // Unmentioned non-clustering columns get NULL literals with the appropriate
-          // target type because Parquet cannot handle NULL_TYPE (IMPALA-617).
-          resultExprs_.add(NullLiteral.create(tblColumn.getType()));
-        }
-      }
-    }
-    // TODO: Check that HBase row-key columns are not NULL? See IMPALA-406
-    if (needsGeneratedQueryStatement_) {
-      // Build a query statement that returns NULL for every column
-      List<SelectListItem> selectListItems = Lists.newArrayList();
-      for(Expr e: resultExprs_) {
-        selectListItems.add(new SelectListItem(e, null));
-      }
-      SelectList selectList = new SelectList(selectListItems);
-      queryStmt_ = new SelectStmt(selectList, null, null, null, null, null, null);
-      queryStmt_.analyze(analyzer);
-    }
-  }
-
-  private void analyzePlanHints(Analyzer analyzer) throws AnalysisException {
-    if (planHints_ == null) return;
-    if (!planHints_.isEmpty() && table_ instanceof HBaseTable) {
-      throw new AnalysisException("INSERT hints are only supported for inserting into " +
-          "Hdfs tables.");
-    }
-    for (String hint: planHints_) {
-      if (hint.equalsIgnoreCase("SHUFFLE")) {
-        if (hasNoShuffleHint_) {
-          throw new AnalysisException("Conflicting INSERT hint: " + hint);
-        }
-        hasShuffleHint_ = true;
-        analyzer.setHasPlanHints();
-      } else if (hint.equalsIgnoreCase("NOSHUFFLE")) {
-        if (hasShuffleHint_) {
-          throw new AnalysisException("Conflicting INSERT hint: " + hint);
-        }
-        hasNoShuffleHint_ = true;
-        analyzer.setHasPlanHints();
-      } else {
-        analyzer.addWarning("INSERT hint not recognized: " + hint);
-      }
-    }
-    // Both flags may be false or one of them may be true, but not both.
-    Preconditions.checkState((!hasShuffleHint_ && !hasNoShuffleHint_)
-        || (hasShuffleHint_ ^ hasNoShuffleHint_));
-  }
-
-  public List<String> getPlanHints() { return planHints_; }
-  public TableName getTargetTableName() { return targetTableName_; }
-  public Table getTargetTable() { return table_; }
-  public void setTargetTable(Table table) { this.table_ = table; }
-  public boolean isOverwrite() { return overwrite_; }
-
-  /**
-   * Only valid after analysis
-   */
-  public QueryStmt getQueryStmt() { return queryStmt_; }
-  public void setQueryStmt(QueryStmt stmt) { queryStmt_ = stmt; }
-  public List<Expr> getPartitionKeyExprs() { return partitionKeyExprs_; }
-  public boolean hasShuffleHint() { return hasShuffleHint_; }
-  public boolean hasNoShuffleHint() { return hasNoShuffleHint_; }
-  public ArrayList<Expr> getResultExprs() { return resultExprs_; }
-
-  public DataSink createDataSink() {
-    // analyze() must have been called before.
-    Preconditions.checkState(table_ != null);
-    return TableSink.create(table_, TableSink.Op.INSERT, partitionKeyExprs_,
-        ImmutableList.<Integer>of(), overwrite_, ignoreDuplicates_);
-  }
-
-  /**
-   * Substitutes the result expressions and the partition key expressions with smap.
-   * Preserves the original types of those expressions during the substitution.
-   */
-  public void substituteResultExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
-    resultExprs_ = Expr.substituteList(resultExprs_, smap, analyzer, true);
-    partitionKeyExprs_ = Expr.substituteList(partitionKeyExprs_, smap, analyzer, true);
-  }
-
-  @Override
-  public String toSql() {
-    StringBuilder strBuilder = new StringBuilder();
-
-    if (withClause_ != null) strBuilder.append(withClause_.toSql() + " ");
-
-    strBuilder.append("INSERT ");
-    if (overwrite_) {
-      strBuilder.append("OVERWRITE ");
-    } else {
-      if (ignoreDuplicates_) strBuilder.append("IGNORE ");
-      strBuilder.append("INTO ");
-    }
-    strBuilder.append("TABLE " + originalTableName_);
-    if (columnPermutation_ != null) {
-      strBuilder.append("(");
-      strBuilder.append(Joiner.on(", ").join(columnPermutation_));
-      strBuilder.append(")");
-    }
-    if (partitionKeyValues_ != null) {
-      List<String> values = Lists.newArrayList();
-      for (PartitionKeyValue pkv: partitionKeyValues_) {
-        values.add(pkv.getColName() +
-            (pkv.getValue() != null ? ("=" + pkv.getValue().toSql()) : ""));
-      }
-      strBuilder.append(" PARTITION (" + Joiner.on(", ").join(values) + ")");
-    }
-    if (planHints_ != null) {
-      strBuilder.append(" " + ToSqlUtils.getPlanHintsSql(planHints_));
-    }
-    if (!needsGeneratedQueryStatement_) {
-      strBuilder.append(" " + queryStmt_.toSql());
-    }
-    return strBuilder.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
deleted file mode 100644
index fd07a32..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/IsNotEmptyPredicate.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * Predicate that checks whether a collection is empty or not.
- * This predicate is not user-accessible from SQL, and may be
- * generated as a performance optimization for certain queries.
- * TODO: Pass this Predicate as a TExprNodeType.FUNCTION_CALL
- * to the BE just like the rest of our Predicates. This is not yet
- * done to avoid invasive changes required in FE/BE to deal with
- * resolution of functions with complex-types arguments,
- */
-public class IsNotEmptyPredicate extends Predicate {
-
-  public IsNotEmptyPredicate(Expr collectionExpr) {
-    super();
-    Preconditions.checkNotNull(collectionExpr);
-    children_.add(collectionExpr);
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!getChild(0).getType().isCollectionType()) {
-      throw new AnalysisException("Operand must be a collection type: "
-          + getChild(0).toSql() + " is of type " + getChild(0).getType());
-    }
-    // Avoid influencing cardinality estimates.
-    selectivity_ = 1.0;
-    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + IS_NOT_EMPTY_COST;
-  }
-
-  @Override
-  public String toSqlImpl() { return "!empty(" + getChild(0).toSql() + ")"; }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.IS_NOT_EMPTY_PRED;
-  }
-
-  @Override
-  public Expr clone() { return new IsNotEmptyPredicate(getChild(0).clone()); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
deleted file mode 100644
index 6a75b5f..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/IsNullPredicate.java
+++ /dev/null
@@ -1,186 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.Reference;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class IsNullPredicate extends Predicate {
-  private final boolean isNotNull_;
-
-  private static final String IS_NULL = "is_null_pred";
-  private static final String IS_NOT_NULL = "is_not_null_pred";
-
-  public IsNullPredicate(Expr e, boolean isNotNull) {
-    super();
-    this.isNotNull_ = isNotNull;
-    Preconditions.checkNotNull(e);
-    children_.add(e);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected IsNullPredicate(IsNullPredicate other) {
-    super(other);
-    isNotNull_ = other.isNotNull_;
-  }
-
-  public boolean isNotNull() { return isNotNull_; }
-
-  public static void initBuiltins(Db db) {
-    for (Type t: Type.getSupportedTypes()) {
-      if (t.isNull()) continue;
-      String isNullSymbol;
-      if (t.isBoolean()) {
-        isNullSymbol = "_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10BooleanValE" +
-            "EES3_PNS2_15FunctionContextERKT_";
-      } else {
-        String udfType = Function.getUdfType(t);
-        isNullSymbol = "_ZN6impala15IsNullPredicate6IsNullIN10impala_udf" +
-            udfType.length() + udfType +
-            "EEENS2_10BooleanValEPNS2_15FunctionContextERKT_";
-      }
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          IS_NULL, isNullSymbol, Lists.newArrayList(t), Type.BOOLEAN));
-
-      String isNotNullSymbol = isNullSymbol.replace("6IsNull", "9IsNotNull");
-      db.addBuiltin(ScalarFunction.createBuiltinOperator(
-          IS_NOT_NULL, isNotNullSymbol, Lists.newArrayList(t), Type.BOOLEAN));
-    }
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((IsNullPredicate) obj).isNotNull_ == isNotNull_;
-  }
-
-  @Override
-  public String toSqlImpl() {
-    return getChild(0).toSql() + (isNotNull_ ? " IS NOT NULL" : " IS NULL");
-  }
-
-  @Override
-  public String debugString() {
-    return Objects.toStringHelper(this)
-        .add("notNull", isNotNull_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-
-    if (contains(Subquery.class)) {
-      if (getChild(0) instanceof ExistsPredicate) {
-        // Replace the EXISTS subquery with a BoolLiteral as it can never return
-        // a null value.
-        setChild(0, new BoolLiteral(true));
-        getChild(0).analyze(analyzer);
-      } else if (!getChild(0).contains(Expr.IS_SCALAR_SUBQUERY)) {
-        // We only support scalar subqueries in an IS NULL predicate because
-        // they can be rewritten into a join.
-        // TODO: Add support for InPredicates and BinaryPredicates with
-        // subqueries when we implement independent subquery evaluation.
-        // TODO: Handle arbitrary UDA/Udfs
-        throw new AnalysisException("Unsupported IS NULL predicate that contains " +
-            "a subquery: " + toSqlImpl());
-      }
-    }
-
-    // Make sure the BE never sees TYPE_NULL
-    if (getChild(0).getType().isNull()) {
-      uncheckedCastChild(ScalarType.BOOLEAN, 0);
-    }
-
-    if (getChild(0).getType().isComplexType()) {
-      String errorMsg = (isNotNull_ ? "IS NOT NULL" : "IS NULL") +
-         " predicate does not support complex types: ";
-      throw new AnalysisException(errorMsg + toSqlImpl());
-    }
-
-    if (isNotNull_) {
-      fn_ = getBuiltinFunction(
-          analyzer, IS_NOT_NULL, collectChildReturnTypes(), CompareMode.IS_IDENTICAL);
-    } else {
-      fn_ = getBuiltinFunction(
-          analyzer, IS_NULL, collectChildReturnTypes(), CompareMode.IS_IDENTICAL);
-    }
-    if (getChild(0).hasCost()) evalCost_ = getChild(0).getCost() + IS_NULL_COST;
-
-    // determine selectivity
-    // TODO: increase this to make sure we don't end up favoring broadcast joins
-    // due to underestimated cardinalities?
-    Reference<SlotRef> slotRefRef = new Reference<SlotRef>();
-    if (isSingleColumnPredicate(slotRefRef, null)) {
-      SlotDescriptor slotDesc = slotRefRef.getRef().getDesc();
-      if (!slotDesc.getStats().hasNulls()) return;
-      Table table = slotDesc.getParent().getTable();
-      if (table != null && table.getNumRows() > 0) {
-        long numRows = table.getNumRows();
-        if (isNotNull_) {
-          selectivity_ =
-              (double) (numRows - slotDesc.getStats().getNumNulls()) / (double) numRows;
-        } else {
-          selectivity_ = (double) slotDesc.getStats().getNumNulls() / (double) numRows;
-        }
-        selectivity_ = Math.max(0.0, Math.min(1.0, selectivity_));
-      }
-    }
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  /*
-   * If predicate is of the form "<SlotRef> IS [NOT] NULL", returns the
-   * SlotRef.
-   */
-  @Override
-  public SlotRef getBoundSlot() {
-    return getChild(0).unwrapSlotRef(true);
-  }
-
-  /**
-   * Negates an IsNullPredicate.
-   */
-  @Override
-  public Expr negate() {
-    return new IsNullPredicate(getChild(0), !isNotNull_);
-  }
-
-  @Override
-  public Expr clone() { return new IsNullPredicate(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java b/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
deleted file mode 100644
index d50861b..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/JoinOperator.java
+++ /dev/null
@@ -1,112 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.thrift.TJoinOp;
-
-public enum JoinOperator {
-  INNER_JOIN("INNER JOIN", TJoinOp.INNER_JOIN),
-  LEFT_OUTER_JOIN("LEFT OUTER JOIN", TJoinOp.LEFT_OUTER_JOIN),
-  LEFT_SEMI_JOIN("LEFT SEMI JOIN", TJoinOp.LEFT_SEMI_JOIN),
-  LEFT_ANTI_JOIN("LEFT ANTI JOIN", TJoinOp.LEFT_ANTI_JOIN),
-  RIGHT_OUTER_JOIN("RIGHT OUTER JOIN", TJoinOp.RIGHT_OUTER_JOIN),
-  RIGHT_SEMI_JOIN("RIGHT SEMI JOIN", TJoinOp.RIGHT_SEMI_JOIN),
-  RIGHT_ANTI_JOIN("RIGHT ANTI JOIN", TJoinOp.RIGHT_ANTI_JOIN),
-  FULL_OUTER_JOIN("FULL OUTER JOIN", TJoinOp.FULL_OUTER_JOIN),
-  CROSS_JOIN("CROSS JOIN", TJoinOp.CROSS_JOIN),
-  // Variant of the LEFT ANTI JOIN that is used for the rewrite of
-  // NOT IN subqueries. It can have a single equality join conjunct
-  // that returns TRUE when the rhs is NULL.
-  NULL_AWARE_LEFT_ANTI_JOIN("NULL AWARE LEFT ANTI JOIN",
-      TJoinOp.NULL_AWARE_LEFT_ANTI_JOIN);
-
-  private final String description_;
-  private final TJoinOp thriftJoinOp_;
-
-  private JoinOperator(String description, TJoinOp thriftJoinOp) {
-    this.description_ = description;
-    this.thriftJoinOp_ = thriftJoinOp;
-  }
-
-  @Override
-  public String toString() {
-    return description_;
-  }
-
-  public TJoinOp toThrift() {
-    return thriftJoinOp_;
-  }
-
-  public boolean isInnerJoin() {
-    return this == INNER_JOIN;
-  }
-
-  public boolean isLeftOuterJoin() { return this == LEFT_OUTER_JOIN; }
-  public boolean isRightOuterJoin() { return this == RIGHT_OUTER_JOIN; }
-
-  public boolean isOuterJoin() {
-    return this == LEFT_OUTER_JOIN
-        || this == RIGHT_OUTER_JOIN
-        || this == FULL_OUTER_JOIN;
-  }
-
-  public boolean isSemiJoin() {
-    return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN ||
-        this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isLeftSemiJoin() {
-    return this == JoinOperator.LEFT_SEMI_JOIN || this == JoinOperator.LEFT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isRightSemiJoin() {
-    return this == JoinOperator.RIGHT_SEMI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN;
-  }
-
-  public boolean isCrossJoin() {
-    return this == JoinOperator.CROSS_JOIN;
-  }
-
-  public boolean isFullOuterJoin() {
-    return this == JoinOperator.FULL_OUTER_JOIN;
-  }
-
-  public boolean isNullAwareLeftAntiJoin() {
-    return this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public boolean isAntiJoin() {
-    return this == JoinOperator.LEFT_ANTI_JOIN || this == JoinOperator.RIGHT_ANTI_JOIN ||
-        this == JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
-  }
-
-  public JoinOperator invert() {
-    switch (this) {
-      case LEFT_OUTER_JOIN: return RIGHT_OUTER_JOIN;
-      case RIGHT_OUTER_JOIN: return LEFT_OUTER_JOIN;
-      case LEFT_SEMI_JOIN: return RIGHT_SEMI_JOIN;
-      case RIGHT_SEMI_JOIN: return LEFT_SEMI_JOIN;
-      case LEFT_ANTI_JOIN: return RIGHT_ANTI_JOIN;
-      case RIGHT_ANTI_JOIN: return LEFT_ANTI_JOIN;
-      case NULL_AWARE_LEFT_ANTI_JOIN: throw new IllegalStateException("Not implemented");
-      default: return this;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java b/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
deleted file mode 100644
index cb006bf..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LikePredicate.java
+++ /dev/null
@@ -1,169 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
-
-import com.cloudera.impala.catalog.Db;
-import com.cloudera.impala.catalog.Function.CompareMode;
-import com.cloudera.impala.catalog.ScalarFunction;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TExprNodeType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-public class LikePredicate extends Predicate {
-  enum Operator {
-    LIKE("LIKE"),
-    ILIKE("ILIKE"),
-    RLIKE("RLIKE"),
-    REGEXP("REGEXP"),
-    IREGEXP("IREGEXP");
-
-    private final String description_;
-
-    private Operator(String description) {
-      this.description_ = description;
-    }
-
-    @Override
-    public String toString() {
-      return description_;
-    }
-  }
-
-  public static void initBuiltins(Db db) {
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.LIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate4LikeEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate11LikePrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate9LikeCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.ILIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate4LikeEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12ILikePrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate9LikeCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.RLIKE.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12RegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.REGEXP.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate12RegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-    db.addBuiltin(ScalarFunction.createBuiltin(
-        Operator.IREGEXP.name(), Lists.<Type>newArrayList(Type.STRING, Type.STRING),
-        false, Type.BOOLEAN, "_ZN6impala13LikePredicate5RegexEPN10impala_udf15FunctionContextERKNS1_9StringValES6_",
-        "_ZN6impala13LikePredicate13IRegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE",
-        "_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE", true));
-  }
-
-  private final Operator op_;
-
-  public LikePredicate(Operator op, Expr e1, Expr e2) {
-    super();
-    this.op_ = op;
-    Preconditions.checkNotNull(e1);
-    children_.add(e1);
-    Preconditions.checkNotNull(e2);
-    children_.add(e2);
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  public LikePredicate(LikePredicate other) {
-    super(other);
-    op_ = other.op_;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) return false;
-    return ((LikePredicate) obj).op_ == op_;
-  }
-
-  @Override
-  public String toSqlImpl() {
-    return getChild(0).toSql() + " " + op_.toString() + " " + getChild(1).toSql();
-  }
-
-  @Override
-  protected void toThrift(TExprNode msg) {
-    msg.node_type = TExprNodeType.FUNCTION_CALL;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (isAnalyzed_) return;
-    super.analyze(analyzer);
-    if (!getChild(0).getType().isStringType() && !getChild(0).getType().isNull()) {
-      throw new AnalysisException(
-          "left operand of " + op_.toString() + " must be of type STRING: " + toSql());
-    }
-    if (!getChild(1).getType().isStringType() && !getChild(1).getType().isNull()) {
-      throw new AnalysisException(
-          "right operand of " + op_.toString() + " must be of type STRING: " + toSql());
-    }
-
-    fn_ = getBuiltinFunction(analyzer, op_.toString(), collectChildReturnTypes(),
-        CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
-    Preconditions.checkState(fn_ != null);
-    Preconditions.checkState(fn_.getReturnType().isBoolean());
-
-    if (getChild(1).isLiteral() && !getChild(1).isNullLiteral()
-        && (op_ == Operator.RLIKE || op_ == Operator.REGEXP || op_ == Operator.IREGEXP)) {
-      // let's make sure the pattern works
-      // TODO: this checks that it's a Java-supported regex, but the syntax supported
-      // by the backend is Posix; add a call to the backend to check the re syntax
-      try {
-        Pattern.compile(((StringLiteral) getChild(1)).getValue());
-      } catch (PatternSyntaxException e) {
-        throw new AnalysisException(
-            "invalid regular expression in '" + this.toSql() + "'");
-      }
-    }
-    castForFunctionCall(false);
-
-    if (hasChildCosts()) {
-      if (getChild(1).isLiteral() && !getChild(1).isNullLiteral() &&
-          Pattern.matches("[%_]*[^%_]*[%_]*", ((StringLiteral) getChild(1)).getValue())) {
-        // This pattern only has wildcards as leading or trailing character,
-        // so it is linear.
-        evalCost_ = getChildCosts() +
-            (float) (getAvgStringLength(getChild(0)) + getAvgStringLength(getChild(1)) *
-            BINARY_PREDICATE_COST) + LIKE_COST;
-      } else {
-        // This pattern is more expensive, so calculate its cost as quadratic.
-        evalCost_ = getChildCosts() +
-            (float) (getAvgStringLength(getChild(0)) * getAvgStringLength(getChild(1)) *
-            BINARY_PREDICATE_COST) + LIKE_COST;
-      }
-    }
-  }
-
-  @Override
-  public Expr clone() { return new LikePredicate(this); }
-
-  public Operator getOp() { return op_; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java b/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
deleted file mode 100644
index 4de9501..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LimitElement.java
+++ /dev/null
@@ -1,183 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.google.common.base.Preconditions;
-
-/**
- * Combination of limit and offset expressions.
- */
-class LimitElement {
-  /////////////////////////////////////////
-  // BEGIN: Members that need to be reset()
-
-  private final Expr limitExpr_;
-  private final Expr offsetExpr_;
-  private long limit_;
-  private long offset_;
-  private boolean isAnalyzed_;
-
-  // END: Members that need to be reset()
-  /////////////////////////////////////////
-
-  /**
-   * Constructs the LimitElement.
-   * @param limitExpr The limit expression. May be null if there is no LIMIT clause.
-   * @param offsetExpr The offset expression. May be null if there is no OFFSET clause.
-   */
-  public LimitElement(Expr limitExpr, Expr offsetExpr) {
-    this.limitExpr_ = limitExpr;
-    this.offsetExpr_ = offsetExpr;
-    isAnalyzed_ = false;
-    limit_ = -1;
-    offset_ = 0;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected LimitElement(LimitElement other) {
-    limitExpr_ = (other.limitExpr_ != null) ? other.limitExpr_.clone() : null;
-    offsetExpr_ = (other.offsetExpr_ != null) ? other.offsetExpr_.clone() : null;
-    limit_ = other.limit_;
-    offset_ = other.offset_;
-    isAnalyzed_ = other.isAnalyzed_;
-  }
-
-  public Expr getLimitExpr() { return limitExpr_; }
-  public Expr getOffsetExpr() { return offsetExpr_; }
-
-  /**
-   * Returns the integer limit, evaluated from the limit expression. Must call analyze()
-   * first. If no limit was set, then -1 is returned.
-   */
-  public long getLimit() {
-    Preconditions.checkState(isAnalyzed_);
-    return limit_;
-  }
-
-  public boolean hasLimit() {
-    Preconditions.checkState(isAnalyzed_);
-    return limit_ != -1;
-  }
-
-  /**
-   * Returns the integer offset, evaluated from the offset expression. Must call
-   * analyze() first. If no offsetExpr exists, then 0 (the default offset) is returned.
-   */
-  public long getOffset() {
-    Preconditions.checkState(isAnalyzed_);
-    return offset_;
-  }
-
-  public String toSql() {
-    StringBuilder sb = new StringBuilder();
-    if (limitExpr_ != null) {
-      sb.append(" LIMIT ");
-      sb.append(limitExpr_.toSql());
-    }
-    // Don't add the offset if it is the default value. However, we do print it if it
-    // hasn't been analyzed yet because we need to output the expression used in errors.
-    if (offsetExpr_ != null && (offset_ != 0 || !isAnalyzed_)) {
-      sb.append(" OFFSET ");
-      sb.append(offsetExpr_.toSql());
-    }
-    return sb.toString();
-  }
-
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    isAnalyzed_ = true;
-    if (limitExpr_ != null) {
-      if (!limitExpr_.isConstant()) {
-        throw new AnalysisException("LIMIT expression must be a constant expression: " +
-            limitExpr_.toSql());
-      }
-
-      limitExpr_.analyze(analyzer);
-      if (!limitExpr_.getType().isIntegerType()) {
-        throw new AnalysisException("LIMIT expression must be an integer type but is '" +
-            limitExpr_.getType() + "': " + limitExpr_.toSql());
-      }
-      limit_ = evalIntegerExpr(analyzer, limitExpr_, "LIMIT");
-    }
-    if (limit_ == 0) analyzer.setHasEmptyResultSet();
-
-    if (offsetExpr_ != null) {
-      if (!offsetExpr_.isConstant()) {
-        throw new AnalysisException("OFFSET expression must be a constant expression: " +
-            offsetExpr_.toSql());
-      }
-
-      offsetExpr_.analyze(analyzer);
-      if (!offsetExpr_.getType().isIntegerType()) {
-        throw new AnalysisException("OFFSET expression must be an integer type but " +
-            "is '" + offsetExpr_.getType() + "': " + offsetExpr_.toSql());
-      }
-      offset_ = evalIntegerExpr(analyzer, offsetExpr_, "OFFSET");
-    }
-  }
-
-  /**
-   * Evaluations an expression to a non-zero integral value, returned as a long. Throws
-   * if the expression cannot be evaluated, if the value evaluates to null, or if the
-   * result is negative. The 'name' parameter is used in exception messages, e.g.
-   * "LIMIT expression evaluates to NULL".
-   */
-  private static long evalIntegerExpr(Analyzer analyzer, Expr expr, String name)
-      throws AnalysisException {
-    TColumnValue val = null;
-    try {
-      val = FeSupport.EvalConstExpr(expr, analyzer.getQueryCtx());
-    } catch (InternalException e) {
-      throw new AnalysisException("Failed to evaluate expr: " + expr.toSql(), e);
-    }
-    long value;
-    if (val.isSetLong_val()) {
-      value = val.getLong_val();
-    } else if (val.isSetInt_val()) {
-      value = val.getInt_val();
-    } else if (val.isSetShort_val()) {
-      value = val.getShort_val();
-    } else if (val.isSetByte_val()) {
-      value = val.getByte_val();
-    } else {
-      throw new AnalysisException(name + " expression evaluates to NULL: " +
-          expr.toSql());
-    }
-    if (value < 0) {
-      throw new AnalysisException(name + " must be a non-negative integer: " +
-          expr.toSql() + " = " + value);
-    }
-    return value;
-  }
-
-  @Override
-  public LimitElement clone() { return new LimitElement(this); }
-
-  public void reset() {
-    isAnalyzed_ = false;
-    limit_ = -1;
-    offset_ = 0;
-    if (limitExpr_ != null) limitExpr_.reset();
-    if (offsetExpr_ != null) offsetExpr_.reset();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java b/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
deleted file mode 100644
index f5eedbb..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LiteralExpr.java
+++ /dev/null
@@ -1,242 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-
-import com.cloudera.impala.catalog.ScalarType;
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.NotImplementedException;
-import com.cloudera.impala.service.FeSupport;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TExprNode;
-import com.cloudera.impala.thrift.TQueryCtx;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a literal expression. Literals are comparable to allow
- * ordering of HdfsPartitions whose partition-key values are represented as literals.
- */
-public abstract class LiteralExpr extends Expr implements Comparable<LiteralExpr> {
-
-  public LiteralExpr() {
-    numDistinctValues_ = 1;
-  }
-
-  /**
-   * Copy c'tor used in clone().
-   */
-  protected LiteralExpr(LiteralExpr other) {
-    super(other);
-  }
-
-  /**
-   * Returns an analyzed literal of 'type'. Returns null for types that do not have a
-   * LiteralExpr subclass, e.g. TIMESTAMP.
-   */
-  public static LiteralExpr create(String value, Type type) throws AnalysisException {
-    Preconditions.checkArgument(type.isValid());
-    LiteralExpr e = null;
-    switch (type.getPrimitiveType()) {
-      case NULL_TYPE:
-        e = new NullLiteral();
-        break;
-      case BOOLEAN:
-        e = new BoolLiteral(value);
-        break;
-      case TINYINT:
-      case SMALLINT:
-      case INT:
-      case BIGINT:
-      case FLOAT:
-      case DOUBLE:
-      case DECIMAL:
-        e = new NumericLiteral(value, type);
-        break;
-      case STRING:
-      case VARCHAR:
-      case CHAR:
-        e = new StringLiteral(value);
-        break;
-      case DATE:
-      case DATETIME:
-      case TIMESTAMP:
-        // TODO: we support TIMESTAMP but no way to specify it in SQL.
-        return null;
-      default:
-        Preconditions.checkState(false,
-            String.format("Literals of type '%s' not supported.", type.toSql()));
-    }
-    e.analyze(null);
-    // Need to cast since we cannot infer the type from the value. e.g. value
-    // can be parsed as tinyint but we need a bigint.
-    return (LiteralExpr) e.uncheckedCastTo(type);
-  }
-
-  /**
-   * Returns an analyzed literal from the thrift object.
-   */
-  public static LiteralExpr fromThrift(TExprNode exprNode, Type colType) {
-    try {
-      LiteralExpr result = null;
-      switch (exprNode.node_type) {
-        case FLOAT_LITERAL:
-          result = LiteralExpr.create(
-              Double.toString(exprNode.float_literal.value), colType);
-          break;
-        case DECIMAL_LITERAL:
-          byte[] bytes = exprNode.decimal_literal.getValue();
-          BigDecimal val = new BigDecimal(new BigInteger(bytes));
-          ScalarType decimalType = (ScalarType) colType;
-          // We store the decimal as the unscaled bytes. Need to adjust for the scale.
-          val = val.movePointLeft(decimalType.decimalScale());
-          result = new NumericLiteral(val, colType);
-          break;
-        case INT_LITERAL:
-          result = LiteralExpr.create(
-              Long.toString(exprNode.int_literal.value), colType);
-          break;
-        case STRING_LITERAL:
-          result = LiteralExpr.create(exprNode.string_literal.value, colType);
-          break;
-        case BOOL_LITERAL:
-          result =  LiteralExpr.create(
-              Boolean.toString(exprNode.bool_literal.value), colType);
-          break;
-        case NULL_LITERAL:
-          return NullLiteral.create(colType);
-        default:
-          throw new UnsupportedOperationException("Unsupported partition key type: " +
-              exprNode.node_type);
-      }
-      Preconditions.checkNotNull(result);
-      result.analyze(null);
-      return result;
-    } catch (Exception e) {
-      throw new IllegalStateException("Error creating LiteralExpr: ", e);
-    }
-  }
-
-  // Returns the string representation of the literal's value. Used when passing
-  // literal values to the metastore rather than to Impala backends. This is similar to
-  // the toSql() method, but does not perform any formatting of the string values. Neither
-  // method unescapes string values.
-  public abstract String getStringValue();
-
-  // Swaps the sign of numeric literals.
-  // Throws for non-numeric literals.
-  public void swapSign() throws NotImplementedException {
-    throw new NotImplementedException("swapSign() only implemented for numeric" +
-        "literals");
-  }
-
-  /**
-   * Evaluates the given constant expr and returns its result as a LiteralExpr.
-   * Assumes expr has been analyzed. Returns constExpr if is it already a LiteralExpr.
-   * Returns null for types that do not have a LiteralExpr subclass, e.g. TIMESTAMP.
-   * TODO: Support non-scalar types.
-   */
-  public static LiteralExpr create(Expr constExpr, TQueryCtx queryCtx)
-      throws AnalysisException {
-    Preconditions.checkState(constExpr.isConstant());
-    Preconditions.checkState(constExpr.getType().isValid());
-    if (constExpr instanceof LiteralExpr) return (LiteralExpr) constExpr;
-
-    TColumnValue val = null;
-    try {
-      val = FeSupport.EvalConstExpr(constExpr, queryCtx);
-    } catch (InternalException e) {
-      throw new AnalysisException(String.format("Failed to evaluate expr '%s'",
-          constExpr.toSql()), e);
-    }
-
-    LiteralExpr result = null;
-    switch (constExpr.getType().getPrimitiveType()) {
-      case NULL_TYPE:
-        result = new NullLiteral();
-        break;
-      case BOOLEAN:
-        if (val.isBool_val()) result = new BoolLiteral(val.bool_val);
-        break;
-      case TINYINT:
-        if (val.isSetByte_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.byte_val));
-        }
-        break;
-      case SMALLINT:
-        if (val.isSetShort_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.short_val));
-        }
-        break;
-      case INT:
-        if (val.isSetInt_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.int_val));
-        }
-        break;
-      case BIGINT:
-        if (val.isSetLong_val()) {
-          result = new NumericLiteral(BigDecimal.valueOf(val.long_val));
-        }
-        break;
-      case FLOAT:
-      case DOUBLE:
-        if (val.isSetDouble_val()) {
-          result =
-              new NumericLiteral(new BigDecimal(val.double_val), constExpr.getType());
-        }
-        break;
-      case DECIMAL:
-        if (val.isSetString_val()) {
-          result =
-              new NumericLiteral(new BigDecimal(val.string_val), constExpr.getType());
-        }
-        break;
-      case STRING:
-      case VARCHAR:
-      case CHAR:
-        if (val.isSetString_val()) result = new StringLiteral(val.string_val);
-        break;
-      case DATE:
-      case DATETIME:
-      case TIMESTAMP:
-        return null;
-      default:
-        Preconditions.checkState(false,
-            String.format("Literals of type '%s' not supported.",
-                constExpr.getType().toSql()));
-    }
-    // None of the fields in the thrift struct were set indicating a NULL.
-    if (result == null) result = new NullLiteral();
-
-    result.analyze(null);
-    return (LiteralExpr)result;
-  }
-
-  // Order NullLiterals based on the SQL ORDER BY default behavior: NULLS LAST.
-  @Override
-  public int compareTo(LiteralExpr other) {
-    if (this instanceof NullLiteral && other instanceof NullLiteral) return 0;
-    if (this instanceof NullLiteral) return -1;
-    if (other instanceof NullLiteral) return 1;
-    if (getClass() != other.getClass()) return -1;
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
deleted file mode 100644
index 109d70a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/LoadDataStmt.java
+++ /dev/null
@@ -1,241 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsFileFormat;
-import com.cloudera.impala.catalog.HdfsPartition;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
-import com.cloudera.impala.thrift.TLoadDataReq;
-import com.cloudera.impala.thrift.TTableName;
-import com.cloudera.impala.util.TAccessLevelUtil;
-import com.cloudera.impala.util.FsPermissionChecker;
-import com.google.common.base.Preconditions;
-
-/*
- * Represents a LOAD DATA statement for moving data into an existing table:
- * LOAD DATA INPATH 'filepath' [OVERWRITE] INTO TABLE <table name>
- * [PARTITION (partcol1=val1, partcol2=val2 ...)]
- *
- * The LOAD DATA operation supports loading (moving) a single file or all files in a
- * given source directory to a table or partition location. If OVERWRITE is true, all
- * exiting files in the destination will be removed before moving the new data in.
- * If OVERWRITE is false, existing files will be preserved. If there are any file name
- * conflicts, the new files will be uniquified by inserting a UUID into the file name
- * (preserving the extension).
- * Loading hidden files is not supported and any hidden files in the source or
- * destination are preserved, even if OVERWRITE is true.
- */
-public class LoadDataStmt extends StatementBase {
-  private final TableName tableName_;
-  private final HdfsUri sourceDataPath_;
-  private final PartitionSpec partitionSpec_;
-  private final boolean overwrite_;
-
-  // Set during analysis
-  private String dbName_;
-
-  public LoadDataStmt(TableName tableName, HdfsUri sourceDataPath, boolean overwrite,
-      PartitionSpec partitionSpec) {
-    Preconditions.checkNotNull(tableName);
-    Preconditions.checkNotNull(sourceDataPath);
-    this.tableName_ = tableName;
-    this.sourceDataPath_ = sourceDataPath;
-    this.overwrite_ = overwrite;
-    this.partitionSpec_ = partitionSpec;
-  }
-
-  public String getTbl() {
-    return tableName_.getTbl();
-  }
-
-  public String getDb() {
-    Preconditions.checkNotNull(dbName_);
-    return dbName_;
-  }
-
-  /*
-   * Print SQL syntax corresponding to this node.
-   * @see com.cloudera.impala.parser.ParseNode#toSql()
-   */
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("LOAD DATA INPATH '");
-    sb.append(sourceDataPath_ + "' ");
-    if (overwrite_) sb.append("OVERWRITE ");
-    sb.append("INTO TABLE " + tableName_.toString());
-    if (partitionSpec_ != null) sb.append(" " + partitionSpec_.toSql());
-    return sb.toString();
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    dbName_ = analyzer.getTargetDbName(tableName_);
-    Table table = analyzer.getTable(tableName_, Privilege.INSERT);
-    if (!(table instanceof HdfsTable)) {
-      throw new AnalysisException("LOAD DATA only supported for HDFS tables: " +
-          dbName_ + "." + getTbl());
-    }
-
-    // Analyze the partition spec, if one was specified.
-    if (partitionSpec_ != null) {
-      partitionSpec_.setTableName(tableName_);
-      partitionSpec_.setPartitionShouldExist();
-      partitionSpec_.setPrivilegeRequirement(Privilege.INSERT);
-      partitionSpec_.analyze(analyzer);
-    } else {
-      if (table.getMetaStoreTable().getPartitionKeysSize() > 0) {
-        throw new AnalysisException("Table is partitioned but no partition spec was " +
-            "specified: " + dbName_ + "." + getTbl());
-      }
-    }
-    analyzePaths(analyzer, (HdfsTable) table);
-  }
-
-  /**
-   * Check to see if Impala has the necessary permissions to access the source and dest
-   * paths for this LOAD statement (which maps onto a sequence of file move operations,
-   * with the requisite permission requirements), and check to see if all files to be
-   * moved are in format that Impala understands. Errors are raised as AnalysisExceptions.
-   */
-  private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable)
-      throws AnalysisException {
-    // The user must have permission to access the source location. Since the files will
-    // be moved from this location, the user needs to have all permission.
-    sourceDataPath_.analyze(analyzer, Privilege.ALL);
-
-    // Catch all exceptions thrown by accessing files, and rethrow as AnalysisExceptions.
-    try {
-      Path source = sourceDataPath_.getPath();
-      FileSystem fs = source.getFileSystem(FileSystemUtil.getConfiguration());
-      if (!(fs instanceof DistributedFileSystem) && !(fs instanceof S3AFileSystem)) {
-        throw new AnalysisException(String.format("INPATH location '%s' " +
-            "must point to an HDFS or S3A filesystem.", sourceDataPath_));
-      }
-      if (!fs.exists(source)) {
-        throw new AnalysisException(String.format(
-            "INPATH location '%s' does not exist.", sourceDataPath_));
-      }
-
-      // If the source file is a directory, we must be able to read from and write to
-      // it. If the source file is a file, we must be able to read from it, and write to
-      // its parent directory (in order to delete the file as part of the move operation).
-      FsPermissionChecker checker = FsPermissionChecker.getInstance();
-
-      if (fs.isDirectory(source)) {
-        if (FileSystemUtil.getTotalNumVisibleFiles(source) == 0) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' contains no visible files.", sourceDataPath_));
-        }
-        if (FileSystemUtil.containsVisibleSubdirectory(source)) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' cannot contain non-hidden subdirectories.",
-              sourceDataPath_));
-        }
-        if (!checker.getPermissions(fs, source).checkPermissions(
-            FsAction.READ_WRITE)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have READ and WRITE permissions on this directory",
-              source));
-        }
-      } else {
-        // INPATH names a file.
-        if (FileSystemUtil.isHiddenFile(source.getName())) {
-          throw new AnalysisException(String.format(
-              "INPATH location '%s' points to a hidden file.", source));
-        }
-
-        if (!checker.getPermissions(fs, source.getParent()).checkPermissions(
-            FsAction.WRITE)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have WRITE permissions on its parent " +
-              "directory %s", source, source.getParent()));
-        }
-
-        if (!checker.getPermissions(fs, source).checkPermissions(
-            FsAction.READ)) {
-          throw new AnalysisException(String.format("Unable to LOAD DATA from %s " +
-              "because Impala does not have READ permissions on this file", source));
-        }
-      }
-
-      String noWriteAccessErrorMsg = String.format("Unable to LOAD DATA into " +
-          "target table (%s) because Impala does not have WRITE access to HDFS " +
-          "location: ", hdfsTable.getFullName());
-
-      HdfsPartition partition;
-      String location;
-      if (partitionSpec_ != null) {
-        partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
-        location = partition.getLocation();
-        if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
-          throw new AnalysisException(noWriteAccessErrorMsg + location);
-        }
-      } else {
-        // "default" partition
-        partition = hdfsTable.getPartitionMap().get(
-            ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID);
-        location = hdfsTable.getLocation();
-        if (!hdfsTable.hasWriteAccess()) {
-          throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
-        }
-      }
-      Preconditions.checkNotNull(partition);
-
-      // Verify the files being loaded are supported.
-      for (FileStatus fStatus: fs.listStatus(source)) {
-        if (fs.isDirectory(fStatus.getPath())) continue;
-        StringBuilder errorMsg = new StringBuilder();
-        HdfsFileFormat fileFormat = partition.getInputFormatDescriptor().getFileFormat();
-        if (!fileFormat.isFileCompressionTypeSupported(fStatus.getPath().toString(),
-          errorMsg)) {
-          throw new AnalysisException(errorMsg.toString());
-        }
-      }
-    } catch (FileNotFoundException e) {
-      throw new AnalysisException("File not found: " + e.getMessage(), e);
-    } catch (IOException e) {
-      throw new AnalysisException("Error accessing filesystem: " + e.getMessage(), e);
-    }
-  }
-
-  public TLoadDataReq toThrift() {
-    TLoadDataReq loadDataReq = new TLoadDataReq();
-    loadDataReq.setTable_name(new TTableName(getDb(), getTbl()));
-    loadDataReq.setSource_path(sourceDataPath_.toString());
-    loadDataReq.setOverwrite(overwrite_);
-    if (partitionSpec_ != null) {
-      loadDataReq.setPartition_spec(partitionSpec_.toThrift());
-    }
-    return loadDataReq;
-  }
-}