You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:14:33 UTC

[16/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SortNode.java b/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
deleted file mode 100644
index 02f0df6..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SortNode.java
+++ /dev/null
@@ -1,242 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.ExprSubstitutionMap;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.SlotRef;
-import com.cloudera.impala.analysis.SortInfo;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.BackendConfig;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TQueryOptions;
-import com.cloudera.impala.thrift.TSortInfo;
-import com.cloudera.impala.thrift.TSortNode;
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Node that implements a sort with or without a limit. useTopN_ is true for sorts
- * with limits that are implemented by a TopNNode in the backend. SortNode is used
- * otherwise.
- * Will always materialize the new tuple info_.sortTupleDesc_.
- */
-public class SortNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(SortNode.class);
-
-  private final SortInfo info_;
-
-  // if set, this SortNode requires its input to have this data partition
-  private DataPartition inputPartition_;
-
-  // if true, the output of this node feeds an AnalyticNode
-  private boolean isAnalyticSort_;
-
-  // info_.sortTupleSlotExprs_ substituted with the outputSmap_ for materialized slots
-  // in init().
-  private List<Expr> resolvedTupleExprs_;
-  private final boolean useTopN_;
-  // The offset of the first row to return.
-  protected long offset_;
-
-  public SortNode(PlanNodeId id, PlanNode input, SortInfo info, boolean useTopN,
-      long offset) {
-    super(id, info.getSortTupleDescriptor().getId().asList(),
-        getDisplayName(useTopN, false));
-    info_ = info;
-    useTopN_ = useTopN;
-    children_.add(input);
-    offset_ = offset;
-  }
-
-  public long getOffset() { return offset_; }
-  public void setOffset(long offset) { offset_ = offset; }
-  public boolean hasOffset() { return offset_ > 0; }
-  public boolean useTopN() { return useTopN_; }
-  public SortInfo getSortInfo() { return info_; }
-  public void setInputPartition(DataPartition inputPartition) {
-    inputPartition_ = inputPartition;
-  }
-  public DataPartition getInputPartition() { return inputPartition_; }
-  public boolean isAnalyticSort() { return isAnalyticSort_; }
-  public void setIsAnalyticSort(boolean v) { isAnalyticSort_ = v; }
-
-  @Override
-  public boolean isBlockingNode() { return true; }
-
-  @Override
-  public void init(Analyzer analyzer) throws InternalException {
-    // Do not assignConjuncts() here, so that conjuncts bound by this SortNode's tuple id
-    // can be placed in a downstream SelectNode. A SortNode cannot evaluate conjuncts.
-    Preconditions.checkState(conjuncts_.isEmpty());
-    // Compute the memory layout for the generated tuple.
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-
-    // populate resolvedTupleExprs_ and outputSmap_
-    List<SlotDescriptor> sortTupleSlots = info_.getSortTupleDescriptor().getSlots();
-    List<Expr> slotExprs = info_.getSortTupleSlotExprs();
-    Preconditions.checkState(sortTupleSlots.size() == slotExprs.size());
-    resolvedTupleExprs_ = Lists.newArrayList();
-    outputSmap_ = new ExprSubstitutionMap();
-    for (int i = 0; i < slotExprs.size(); ++i) {
-      if (!sortTupleSlots.get(i).isMaterialized()) continue;
-      resolvedTupleExprs_.add(slotExprs.get(i));
-      outputSmap_.put(slotExprs.get(i), new SlotRef(sortTupleSlots.get(i)));
-    }
-    ExprSubstitutionMap childSmap = getCombinedChildSmap();
-    resolvedTupleExprs_ =
-        Expr.substituteList(resolvedTupleExprs_, childSmap, analyzer, false);
-
-    // Remap the ordering exprs to the tuple materialized by this sort node. The mapping
-    // is a composition of the childSmap and the outputSmap_ because the child node may
-    // have also remapped its input (e.g., as in a a series of (sort->analytic)* nodes).
-    // Parent nodes have have to do the same so set the composition as the outputSmap_.
-    outputSmap_ = ExprSubstitutionMap.compose(childSmap, outputSmap_, analyzer);
-
-    info_.substituteOrderingExprs(outputSmap_, analyzer);
-    info_.checkConsistency();
-
-    LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
-        + outputSmap_.debugString());
-    LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = capAtLimit(getChild(0).cardinality_);
-    LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
-  }
-
-  @Override
-  protected String debugString() {
-    List<String> strings = Lists.newArrayList();
-    for (Boolean isAsc : info_.getIsAscOrder()) {
-      strings.add(isAsc ? "a" : "d");
-    }
-    return Objects.toStringHelper(this)
-        .add("ordering_exprs", Expr.debugString(info_.getOrderingExprs()))
-        .add("is_asc", "[" + Joiner.on(" ").join(strings) + "]")
-        .add("nulls_first", "[" + Joiner.on(" ").join(info_.getNullsFirst()) + "]")
-        .add("offset_", offset_)
-        .addValue(super.debugString())
-        .toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.SORT_NODE;
-    TSortInfo sort_info = new TSortInfo(Expr.treesToThrift(info_.getOrderingExprs()),
-        info_.getIsAscOrder(), info_.getNullsFirst());
-    Preconditions.checkState(tupleIds_.size() == 1,
-        "Incorrect size for tupleIds_ in SortNode");
-    sort_info.sort_tuple_slot_exprs = Expr.treesToThrift(resolvedTupleExprs_);
-    TSortNode sort_node = new TSortNode(sort_info, useTopN_);
-    sort_node.setOffset(offset_);
-    msg.sort_node = sort_node;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s:%s%s\n", prefix, id_.toString(),
-        displayName_, getNodeExplainDetail(detailLevel)));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      output.append(detailPrefix + "order by: ");
-      for (int i = 0; i < info_.getOrderingExprs().size(); ++i) {
-        if (i > 0) output.append(", ");
-        output.append(info_.getOrderingExprs().get(i).toSql() + " ");
-        output.append(info_.getIsAscOrder().get(i) ? "ASC" : "DESC");
-
-        Boolean nullsFirstParam = info_.getNullsFirstParams().get(i);
-        if (nullsFirstParam != null) {
-          output.append(nullsFirstParam ? " NULLS FIRST" : " NULLS LAST");
-        }
-      }
-      output.append("\n");
-    }
-    return output.toString();
-  }
-
-  private String getNodeExplainDetail(TExplainLevel detailLevel) {
-    if (!hasLimit()) return "";
-    if (hasOffset()) {
-      return String.format(" [LIMIT=%s OFFSET=%s]", limit_, offset_);
-    } else {
-      return String.format(" [LIMIT=%s]", limit_);
-    }
-  }
-
-  @Override
-  protected String getOffsetExplainString(String prefix) {
-    return offset_ != 0 ? prefix + "offset: " + Long.toString(offset_) + "\n" : "";
-  }
-
-  @Override
-  public void computeCosts(TQueryOptions queryOptions) {
-    Preconditions.checkState(hasValidStats());
-    if (useTopN_) {
-      perHostMemCost_ = (long) Math.ceil((cardinality_ + offset_) * avgRowSize_);
-      return;
-    }
-
-    // For an external sort, set the memory cost to be what is required for a 2-phase
-    // sort. If the input to be sorted would take up N blocks in memory, then the
-    // memory required for a 2-phase sort is sqrt(N) blocks. A single run would be of
-    // size sqrt(N) blocks, and we could merge sqrt(N) such runs with sqrt(N) blocks
-    // of memory.
-    double fullInputSize = getChild(0).cardinality_ * avgRowSize_;
-    boolean hasVarLenSlots = false;
-    for (SlotDescriptor slotDesc: info_.getSortTupleDescriptor().getSlots()) {
-      if (slotDesc.isMaterialized() && !slotDesc.getType().isFixedLengthType()) {
-        hasVarLenSlots = true;
-        break;
-      }
-    }
-
-    // The block size used by the sorter is the same as the configured I/O read size.
-    long blockSize = BackendConfig.INSTANCE.getReadSize();
-    // The external sorter writes fixed-len and var-len data in separate sequences of
-    // blocks on disk and reads from both sequences when merging. This effectively
-    // doubles the block size when there are var-len columns present.
-    if (hasVarLenSlots) blockSize *= 2;
-    double numInputBlocks = Math.ceil(fullInputSize / blockSize);
-    perHostMemCost_ = blockSize * (long) Math.ceil(Math.sqrt(numInputBlocks));
-  }
-
-  private static String getDisplayName(boolean isTopN, boolean isMergeOnly) {
-    if (isTopN) {
-      return "TOP-N";
-    } else {
-      return "SORT";
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java b/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
deleted file mode 100644
index 4512268..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/SubplanNode.java
+++ /dev/null
@@ -1,109 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.google.common.base.Preconditions;
-
-/**
- * A SubplanNode evaluates its right child plan tree for every row from its left child,
- * and returns those rows produced by the right child. The right child is called the
- * 'subplan tree' and the left child the 'input'. A SubplanNode is similar to a join,
- * but different in the following respects. First, a SubplanNode does not do any real
- * work itself. It only returns rows produced by the right child plan tree, which
- * typically has a dependency on the current input row (see SingularRowSrcNode and
- * UnnestNode). Second, no join predicates are required. A SubplanNode does not
- * evaluate any conjuncts.
- */
-public class SubplanNode extends PlanNode {
-  private PlanNode subplan_;
-
-  public SubplanNode(PlanNode input) {
-    super("SUBPLAN");
-    children_.add(input);
-  }
-
-  /**
-   * Sets the subplan of this SubplanNode. Dependent plan nodes such as UnnestNodes
-   * and SingularRowSrcNodes need to know their SubplanNode parent, therefore, setting
-   * the subplan in this SubplanNode is deferred until the subplan tree has been
-   * constructed (which requires the parent SubplanNode to have been constructed).
-   */
-  public void setSubplan(PlanNode subplan) {
-    Preconditions.checkState(children_.size() == 1);
-    subplan_ = subplan;
-    children_.add(subplan);
-    computeTupleIds();
-  }
-
-  @Override
-  public void computeTupleIds() {
-    Preconditions.checkNotNull(subplan_);
-    clearTupleIds();
-    tblRefIds_.addAll(subplan_.getTblRefIds());
-    tupleIds_.addAll(subplan_.getTupleIds());
-    nullableTupleIds_.addAll(subplan_.getNullableTupleIds());
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws InternalException {
-    // Subplan root must have been set.
-    Preconditions.checkState(children_.size() == 2);
-    // Check that there are no unassigned conjuncts that can be evaluated by this node.
-    // All such conjuncts should have already been assigned in the right child.
-    assignConjuncts(analyzer);
-    Preconditions.checkState(conjuncts_.isEmpty());
-    computeStats(analyzer);
-    outputSmap_ = getChild(1).getOutputSmap();
-    // Save state of assigned conjuncts for join-ordering attempts (see member comment).
-    assignedConjuncts_ = analyzer.getAssignedConjuncts();
-  }
-
-  @Override
-  protected void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    if (getChild(0).cardinality_ != -1 && getChild(1).cardinality_ != -1) {
-      cardinality_ =
-          multiplyCardinalities(getChild(0).cardinality_, getChild(1).cardinality_);
-    } else {
-      cardinality_ = -1;
-    }
-    cardinality_ = capAtLimit(cardinality_);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s\n", prefix, getDisplayLabel()));
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!conjuncts_.isEmpty()) {
-        output.append(detailPrefix + "predicates: " +
-            getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) { msg.node_type = TPlanNodeType.SUBPLAN_NODE; }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/TableSink.java b/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
deleted file mode 100644
index ae73b54..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/TableSink.java
+++ /dev/null
@@ -1,114 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.KuduTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.thrift.TSinkAction;
-import com.google.common.base.Preconditions;
-
-import java.util.List;
-
-/**
- * A DataSink that writes into a table.
- *
- */
-public abstract class TableSink extends DataSink {
-
-  /**
-   * Enum to specify the sink operation type.
-   */
-  public enum Op {
-    INSERT {
-      @Override
-      public String toExplainString() { return "INSERT INTO"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.INSERT; }
-    },
-    UPDATE {
-      @Override
-      public String toExplainString() { return "UPDATE"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.UPDATE; }
-    },
-    DELETE {
-      @Override
-      public String toExplainString() { return "DELETE FROM"; }
-
-      @Override
-      public TSinkAction toThrift() { return TSinkAction.DELETE; }
-    };
-
-    public abstract String toExplainString();
-
-    public abstract TSinkAction toThrift();
-  }
-
-  // Table which is to be populated by this sink.
-  protected final Table targetTable_;
-  // The type of operation to be performed by this sink.
-  protected final Op sinkOp_;
-
-  public TableSink(Table targetTable, Op sinkAction) {
-    targetTable_ = targetTable;
-    sinkOp_ = sinkAction;
-  }
-
-  /**
-   * Returns an output sink appropriate for writing to the given table.
-   * Not all Ops are supported for all tables.
-   * All parameters must be non-null, the lists in particular need to be empty if they
-   * don't make sense for a certain table type.
-   */
-  public static TableSink create(Table table, Op sinkAction,
-      List<Expr> partitionKeyExprs,  List<Integer> referencedColumns,
-      boolean overwrite, boolean ignoreDuplicates) {
-    if (table instanceof HdfsTable) {
-      // Hdfs only supports inserts.
-      Preconditions.checkState(sinkAction == Op.INSERT);
-      // Referenced columns don't make sense for an Hdfs table.
-      Preconditions.checkState(referencedColumns.isEmpty());
-      return new HdfsTableSink(table, partitionKeyExprs, overwrite);
-    } else if (table instanceof HBaseTable) {
-      // HBase only supports inserts.
-      Preconditions.checkState(sinkAction == Op.INSERT);
-      // Partition clause doesn't make sense for an HBase table.
-      Preconditions.checkState(partitionKeyExprs.isEmpty());
-      // HBase doesn't have a way to perform INSERT OVERWRITE
-      Preconditions.checkState(overwrite == false);
-      // Referenced columns don't make sense for an HBase table.
-      Preconditions.checkState(referencedColumns.isEmpty());
-      // Create the HBaseTableSink and return it.
-      return new HBaseTableSink(table);
-    } else if (table instanceof KuduTable) {
-      // Kudu doesn't have a way to perform INSERT OVERWRITE.
-      Preconditions.checkState(overwrite == false);
-      // Partition clauses don't make sense for Kudu inserts.
-      Preconditions.checkState(partitionKeyExprs.isEmpty());
-      return new KuduTableSink(table, sinkAction, referencedColumns, ignoreDuplicates);
-    } else {
-      throw new UnsupportedOperationException(
-          "Cannot create data sink into table of type: " + table.getClass().getName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java b/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
deleted file mode 100644
index ef67277..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/UnionNode.java
+++ /dev/null
@@ -1,231 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.SlotDescriptor;
-import com.cloudera.impala.analysis.TupleId;
-import com.cloudera.impala.common.Pair;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TExpr;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnionNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Node that merges the results of its child plans by materializing
- * the corresponding result exprs into a new tuple.
- */
-public class UnionNode extends PlanNode {
-  private final static Logger LOG = LoggerFactory.getLogger(UnionNode.class);
-
-  // Expr lists corresponding to the input query stmts.
-  // The ith resultExprList belongs to the ith child.
-  // All exprs are resolved to base tables.
-  protected List<List<Expr>> resultExprLists_ = Lists.newArrayList();
-
-  // Expr lists that originate from constant select stmts.
-  // We keep them separate from the regular expr lists to avoid null children.
-  protected List<List<Expr>> constExprLists_ = Lists.newArrayList();
-
-  // Materialized result/const exprs corresponding to materialized slots.
-  // Set in init() and substituted against the corresponding child's output smap.
-  protected List<List<Expr>> materializedResultExprLists_ = Lists.newArrayList();
-  protected List<List<Expr>> materializedConstExprLists_ = Lists.newArrayList();
-
-  protected final TupleId tupleId_;
-
-  protected UnionNode(PlanNodeId id, TupleId tupleId) {
-    super(id, tupleId.asList(), "UNION");
-    tupleId_ = tupleId;
-  }
-
-  public void addConstExprList(List<Expr> exprs) { constExprLists_.add(exprs); }
-
-  /**
-   * Returns true if this UnionNode has only constant exprs.
-   */
-  public boolean isConstantUnion() { return resultExprLists_.isEmpty(); }
-
-  /**
-   * Add a child tree plus its corresponding resolved resultExprs.
-   */
-  public void addChild(PlanNode node, List<Expr> baseTblResultExprs) {
-    super.addChild(node);
-    resultExprLists_.add(baseTblResultExprs);
-    if (baseTblResultExprs != null) {
-      // if we're materializing output, we can only do that into a single
-      // output tuple
-      Preconditions.checkState(tupleIds_.size() == 1, tupleIds_.size());
-    }
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = constExprLists_.size();
-    for (PlanNode child: children_) {
-      // ignore missing child cardinality info in the hope it won't matter enough
-      // to change the planning outcome
-      if (child.cardinality_ > 0) {
-        cardinality_ = addCardinalities(cardinality_, child.cardinality_);
-      }
-    }
-    // The number of nodes of a union node is -1 (invalid) if all the referenced tables
-    // are inline views (e.g. select 1 FROM (VALUES(1 x, 1 y)) a FULL OUTER JOIN
-    // (VALUES(1 x, 1 y)) b ON (a.x = b.y)). We need to set the correct value.
-    if (numNodes_ == -1) numNodes_ = 1;
-    cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
-  }
-
-  /**
-   * Re-order the union's operands descending by their estimated per-host memory,
-   * such that parent nodes can gauge the peak memory consumption of this MergeNode after
-   * opening it during execution (a MergeNode opens its first operand in Open()).
-   * Scan nodes are always ordered last because they can dynamically scale down their
-   * memory usage, whereas many other nodes cannot (e.g., joins, aggregations).
-   * One goal is to decrease the likelihood of a SortNode parent claiming too much
-   * memory in its Open(), possibly causing the mem limit to be hit when subsequent
-   * union operands are executed.
-   * Can only be called on a fragmented plan because this function calls computeCosts()
-   * on this node's children.
-   * TODO: Come up with a good way of handing memory out to individual operators so that
-   * they don't trip each other up. Then remove this function.
-   */
-  public void reorderOperands(Analyzer analyzer) {
-    Preconditions.checkNotNull(fragment_,
-        "Operands can only be reordered on the fragmented plan.");
-
-    // List of estimated per-host memory consumption (first) by child index (second).
-    List<Pair<Long, Integer>> memByChildIdx = Lists.newArrayList();
-    for (int i = 0; i < children_.size(); ++i) {
-      PlanNode child = children_.get(i);
-      child.computeCosts(analyzer.getQueryCtx().request.getQuery_options());
-      memByChildIdx.add(new Pair<Long, Integer>(child.getPerHostMemCost(), i));
-    }
-
-    Collections.sort(memByChildIdx,
-        new Comparator<Pair<Long, Integer>>() {
-      public int compare(Pair<Long, Integer> a, Pair<Long, Integer> b) {
-        PlanNode aNode = children_.get(a.second);
-        PlanNode bNode = children_.get(b.second);
-        // Order scan nodes last because they can dynamically scale down their mem.
-        if (bNode instanceof ScanNode && !(aNode instanceof ScanNode)) return -1;
-        if (aNode instanceof ScanNode && !(bNode instanceof ScanNode)) return 1;
-        long diff = b.first - a.first;
-        return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
-      }
-    });
-
-    List<List<Expr>> newResultExprLists = Lists.newArrayList();
-    ArrayList<PlanNode> newChildren = Lists.newArrayList();
-    for (Pair<Long, Integer> p: memByChildIdx) {
-      newResultExprLists.add(resultExprLists_.get(p.second));
-      newChildren.add(children_.get(p.second));
-    }
-    resultExprLists_ = newResultExprLists;
-    children_ = newChildren;
-  }
-
-  /**
-   * Must be called after addChild()/addConstExprList(). Computes the materialized
-   * result/const expr lists based on the materialized slots of this UnionNode's
-   * produced tuple. The UnionNode doesn't need an smap: like a ScanNode, it
-   * materializes an original tuple.
-   * There is no need to call assignConjuncts() because all non-constant conjuncts
-   * have already been assigned to the union operands, and all constant conjuncts have
-   * been evaluated during registration to set analyzer.hasEmptyResultSet_.
-   */
-  @Override
-  public void init(Analyzer analyzer) {
-    Preconditions.checkState(conjuncts_.isEmpty());
-    computeMemLayout(analyzer);
-    computeStats(analyzer);
-
-    // drop resultExprs/constExprs that aren't getting materialized (= where the
-    // corresponding output slot isn't being materialized)
-    materializedResultExprLists_.clear();
-    Preconditions.checkState(resultExprLists_.size() == children_.size());
-    List<SlotDescriptor> slots = analyzer.getDescTbl().getTupleDesc(tupleId_).getSlots();
-    for (int i = 0; i < resultExprLists_.size(); ++i) {
-      List<Expr> exprList = resultExprLists_.get(i);
-      List<Expr> newExprList = Lists.newArrayList();
-      Preconditions.checkState(exprList.size() == slots.size());
-      for (int j = 0; j < exprList.size(); ++j) {
-        if (slots.get(j).isMaterialized()) newExprList.add(exprList.get(j));
-      }
-      materializedResultExprLists_.add(
-          Expr.substituteList(newExprList, getChild(i).getOutputSmap(), analyzer, true));
-    }
-    Preconditions.checkState(
-        materializedResultExprLists_.size() == getChildren().size());
-
-    materializedConstExprLists_.clear();
-    for (List<Expr> exprList: constExprLists_) {
-      Preconditions.checkState(exprList.size() == slots.size());
-      List<Expr> newExprList = Lists.newArrayList();
-      for (int i = 0; i < exprList.size(); ++i) {
-        if (slots.get(i).isMaterialized()) newExprList.add(exprList.get(i));
-      }
-      materializedConstExprLists_.add(newExprList);
-    }
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    Preconditions.checkState(materializedResultExprLists_.size() == children_.size());
-    List<List<TExpr>> texprLists = Lists.newArrayList();
-    for (List<Expr> exprList: materializedResultExprLists_) {
-      texprLists.add(Expr.treesToThrift(exprList));
-    }
-    List<List<TExpr>> constTexprLists = Lists.newArrayList();
-    for (List<Expr> constTexprList: materializedConstExprLists_) {
-      constTexprLists.add(Expr.treesToThrift(constTexprList));
-    }
-    msg.union_node = new TUnionNode(tupleId_.asInt(), texprLists, constTexprLists);
-    msg.node_type = TPlanNodeType.UNION_NODE;
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s:%s\n", prefix, id_.toString(), displayName_));
-    // A UnionNode may have predicates if a union is used inside an inline view,
-    // and the enclosing select stmt has predicates referring to the inline view.
-    if (!conjuncts_.isEmpty()) {
-      output.append(detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-    }
-    if (!constExprLists_.isEmpty()) {
-      output.append(detailPrefix + "constant-operands=" + constExprLists_.size() + "\n");
-    }
-    return output.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java b/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
deleted file mode 100644
index 3a45882..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/UnnestNode.java
+++ /dev/null
@@ -1,107 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.CollectionTableRef;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.thrift.TExplainLevel;
-import com.cloudera.impala.thrift.TPlanNode;
-import com.cloudera.impala.thrift.TPlanNodeType;
-import com.cloudera.impala.thrift.TUnnestNode;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-
-/**
- * An UnnestNode scans over a collection materialized in memory, and returns
- * one row per item in the collection.
- * An UnnestNode can only appear in the plan tree of a SubplanNode.
- */
-public class UnnestNode extends PlanNode {
-  private final SubplanNode containingSubplanNode_;
-  private final CollectionTableRef tblRef_;
-  private final Expr collectionExpr_;
-
-  public UnnestNode(PlanNodeId id, SubplanNode containingSubplanNode,
-      CollectionTableRef tblRef) {
-    super(id, tblRef.getDesc().getId().asList(), "UNNEST");
-    containingSubplanNode_ = containingSubplanNode;
-    tblRef_ = tblRef;
-    collectionExpr_ = tblRef_.getCollectionExpr();
-    // Assume the collection expr has been fully resolved in analysis.
-    Preconditions.checkState(
-        collectionExpr_.isBoundByTupleIds(containingSubplanNode.getChild(0).tupleIds_));
-  }
-
-  @Override
-  public void init(Analyzer analyzer) throws ImpalaException {
-    // Do not assign binding predicates or predicates for enforcing slot equivalences
-    // because they must have been assigned in the scan node materializing the
-    // collection-typed slot.
-    super.init(analyzer);
-    conjuncts_ = orderConjunctsByCost(conjuncts_);
-
-    // Unnest is like a scan and must materialize the slots of its conjuncts.
-    analyzer.materializeSlots(conjuncts_);
-    computeMemLayout(analyzer);
-  }
-
-  @Override
-  public void computeStats(Analyzer analyzer) {
-    super.computeStats(analyzer);
-    cardinality_ = PlannerContext.AVG_COLLECTION_SIZE;
-    // The containing SubplanNode has not yet been initialized, so get the number
-    // of nodes from the SubplanNode's input.
-    numNodes_ = containingSubplanNode_.getChild(0).getNumNodes();
-    cardinality_ = capAtLimit(cardinality_);
-  }
-
-  @Override
-  protected String getNodeExplainString(String prefix, String detailPrefix,
-      TExplainLevel detailLevel) {
-    StringBuilder output = new StringBuilder();
-    output.append(String.format("%s%s [%s]\n", prefix, getDisplayLabel(),
-        getDisplayLabelDetail()));
-    if (detailLevel.ordinal() >= TExplainLevel.EXTENDED.ordinal()) {
-      output.append(String.format(
-          "%sparent-subplan=%s\n", detailPrefix, containingSubplanNode_.getId()));
-    }
-    if (detailLevel.ordinal() >= TExplainLevel.STANDARD.ordinal()) {
-      if (!conjuncts_.isEmpty()) {
-        output.append(
-            detailPrefix + "predicates: " + getExplainString(conjuncts_) + "\n");
-      }
-    }
-    return output.toString();
-  }
-
-  @Override
-  protected String getDisplayLabelDetail() {
-    StringBuilder strBuilder = new StringBuilder();
-    strBuilder.append(Joiner.on(".").join(tblRef_.getPath()));
-    if (tblRef_.hasExplicitAlias()) strBuilder.append(" " + tblRef_.getExplicitAlias());
-    return strBuilder.toString();
-  }
-
-  @Override
-  protected void toThrift(TPlanNode msg) {
-    msg.node_type = TPlanNodeType.UNNEST_NODE;
-    msg.setUnnest_node(new TUnnestNode(collectionExpr_.treeToThrift()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java b/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
deleted file mode 100644
index e76da9d..0000000
--- a/fe/src/main/java/com/cloudera/impala/planner/ValueRange.java
+++ /dev/null
@@ -1,121 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.planner;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.Analyzer;
-import com.cloudera.impala.analysis.BinaryPredicate;
-import com.cloudera.impala.analysis.CompoundPredicate;
-import com.cloudera.impala.analysis.Expr;
-import com.cloudera.impala.analysis.Predicate;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.service.FeSupport;
-import com.google.common.base.Preconditions;
-
-/**
- * Representation of a two-sided interval of values. Either one of the sides
- * is optional, and can be exclusive or inclusive. For a range representing a single
- * value, both bounds are set.
- */
-public class ValueRange {
-  private final static Logger LOG = LoggerFactory.getLogger(ValueRange.class);
-
-  private Expr lowerBound_;
-  private boolean lowerBoundInclusive_;
-  private Expr upperBound_;
-  private boolean upperBoundInclusive_;
-
-  Expr getLowerBound() { return lowerBound_; }
-  void setLowerBound(Expr e) { lowerBound_ = e; }
-  boolean getLowerBoundInclusive() { return lowerBoundInclusive_; }
-  void setLowerBoundInclusive(boolean b) { lowerBoundInclusive_ = b; }
-  Expr getUpperBound() { return upperBound_; }
-  void setUpperBound(Expr e) { upperBound_ = e; }
-  boolean getUpperBoundInclusive() { return upperBoundInclusive_; }
-  void setUpperBoundInclusive(boolean b) { upperBoundInclusive_ = b; }
-
-  static public ValueRange createEqRange(Expr valueExpr) {
-    ValueRange result = new ValueRange();
-    result.lowerBound_ = valueExpr;
-    result.lowerBoundInclusive_ = true;
-    result.upperBound_ = valueExpr;
-    result.upperBoundInclusive_ = true;
-    return result;
-  }
-
-  public boolean isEqRange() {
-    return lowerBound_ == upperBound_ && lowerBoundInclusive_ && upperBoundInclusive_;
-  }
-
-  /**
-   * Determines whether a given constant expr is within the range.
-   * Does this by constructing predicate that represents the range,
-   * with the valueExpr inserted appropriately, and then calls the
-   * backend for evaluation.
-   */
-  public boolean isInRange(Analyzer analyzer, Expr valueExpr) throws
-      InternalException {
-    Preconditions.checkState(valueExpr.isConstant());
-    Preconditions.checkState(lowerBound_ != null || upperBound_ != null);
-
-    // construct predicate
-    Predicate p = null;
-    if (lowerBound_ != null && upperBound_ != null
-        && lowerBoundInclusive_ && upperBoundInclusive_
-        && lowerBound_ == upperBound_) {
-      // construct "=" predicate
-      p = new BinaryPredicate(BinaryPredicate.Operator.EQ, valueExpr, lowerBound_);
-    } else {
-      // construct range predicate
-      if (lowerBound_ != null) {
-        p = new BinaryPredicate(
-            lowerBoundInclusive_
-              ? BinaryPredicate.Operator.GE : BinaryPredicate.Operator.GT,
-            valueExpr, lowerBound_);
-      }
-      if (upperBound_ != null) {
-        Predicate p2 = new BinaryPredicate(
-            upperBoundInclusive_
-              ? BinaryPredicate.Operator.GE : BinaryPredicate.Operator.GT,
-            upperBound_, valueExpr);
-        if (p != null) {
-          p = new CompoundPredicate(CompoundPredicate.Operator.AND, p, p2);
-        } else {
-          p = p2;
-        }
-      }
-    }
-
-    Preconditions.checkState(p.isConstant());
-    // analyze to insert casts, etc.
-    try {
-      p.analyze(analyzer);
-    } catch (AnalysisException e) {
-      // this should never happen
-      throw new InternalException(
-          "couldn't analyze predicate " + p.toSql() + "\n" + e.toString());
-    }
-
-    // call backend
-    return FeSupport.EvalPredicate(p, analyzer.getQueryCtx());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java b/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
deleted file mode 100644
index e98ab571..0000000
--- a/fe/src/main/java/com/cloudera/impala/service/BackendConfig.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.service;
-
-/**
- * This class is meant to provide the FE with impalad backend configuration parameters,
- * including command line arguments.
- * TODO: Remove this class and either
- * a) Figure out if there's a standard way to access flags from java
- * b) Create a util/gflags.java that let's us access the be flags
- */
-public class BackendConfig {
-  public static BackendConfig INSTANCE = new BackendConfig();
-
-  // Default read block size (in bytes). This is the same as
-  // the default FLAGS_read_size used by the IO manager in the backend.
-  private final long READ_SIZE;
-
-  // This is overriden by JniFrontend/JniCatalog classes with user set configuration.
-  // TODO: Read this from backend instead of using static variables.
-  private static boolean allowAuthToLocalRules_ = false;
-
-  private BackendConfig() {
-    // TODO: Populate these by making calls to the backend instead of default constants.
-    READ_SIZE = 8 * 1024 * 1024L;
-  }
-
-  public long getReadSize() { return READ_SIZE; }
-
-  public static boolean isAuthToLocalEnabled() { return allowAuthToLocalRules_; }
-  public static void setAuthToLocal(boolean authToLocal) {
-    allowAuthToLocalRules_ = authToLocal;
-  }
-}