You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2019/02/06 22:07:15 UTC

[impala] branch master updated: IMPALA-8148: Misc. FE code cleanup

This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 69652fa  IMPALA-8148: Misc. FE code cleanup
69652fa is described below

commit 69652fa0d6f92f3e23a7de1f0137ffd321635824
Author: Paul Rogers <pr...@cloudera.com>
AuthorDate: Wed Jan 30 13:31:20 2019 -0800

    IMPALA-8148: Misc. FE code cleanup
    
    Roll-up of a bunch of minor code cleanup and refactoring. Pulled out of
    other patches to keep those patches focused on a single change.
    
    Tests: Reran all FE tests to verify no regressions.
    
    Change-Id: I4e2f2e79f36e804fd592b6fe77f0554c5ca803eb
    Reviewed-on: http://gerrit.cloudera.org:8080/12317
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../java/org/apache/impala/analysis/Analyzer.java  |   3 +-
 .../java/org/apache/impala/analysis/CastExpr.java  |   6 +
 .../java/org/apache/impala/analysis/ColumnDef.java |   4 +-
 .../apache/impala/analysis/ComputeStatsStmt.java   |  24 +-
 .../main/java/org/apache/impala/analysis/Expr.java | 113 +++++-----
 .../apache/impala/analysis/FunctionCallExpr.java   |  11 +-
 .../org/apache/impala/analysis/InlineViewRef.java  |  13 +-
 .../apache/impala/analysis/PartitionKeyValue.java  |   2 +-
 .../java/org/apache/impala/analysis/QueryStmt.java |   2 +-
 .../org/apache/impala/analysis/RangePartition.java |   2 +-
 .../org/apache/impala/analysis/SelectStmt.java     |  24 +-
 .../java/org/apache/impala/analysis/SlotRef.java   |  23 +-
 .../java/org/apache/impala/analysis/TableRef.java  |   2 +-
 .../org/apache/impala/analysis/ToSqlUtils.java     |   5 +
 .../apache/impala/analysis/TupleDescriptor.java    |   3 +
 .../org/apache/impala/catalog/FeCatalogUtils.java  |  10 +-
 .../org/apache/impala/catalog/HdfsPartition.java   | 242 +++++++++++----------
 .../impala/common/InvalidValueException.java       |   2 +-
 .../java/org/apache/impala/common/TreeNode.java    |   1 +
 .../apache/impala/planner/HdfsPartitionPruner.java |  12 +-
 .../java/org/apache/impala/planner/PlanNode.java   |   5 +-
 .../impala/rewrite/SimplifyConditionalsRule.java   |   4 +-
 .../apache/impala/service/CatalogOpExecutor.java   |  36 +--
 .../impala/util/MaxRowsProcessedVisitor.java       |   9 +-
 .../apache/impala/analysis/AnalyzeExprsTest.java   |   6 +-
 .../apache/impala/catalog/HdfsPartitionTest.java   |   3 +
 .../impala/catalog/PartialCatalogInfoTest.java     |   6 +-
 .../events/MetastoreEventsProcessorTest.java       |   9 +-
 tests/common/skip.py                               |  18 +-
 tests/util/test_file_parser.py                     |   2 +-
 30 files changed, 316 insertions(+), 286 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index d6529ad..91bc75e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -1755,6 +1755,7 @@ public class Analyzer {
    * among slots in ignoreSlots are assumed to have already been enforced.
    * TODO: Consider optimizing for the cheapest minimum set of predicates.
    */
+  @SuppressWarnings("unchecked")
   public <T extends Expr> void createEquivConjuncts(TupleId tid, List<T> conjuncts,
       Set<SlotId> ignoreSlots) {
     // Maps from a slot id to its set of equivalent slots. Used to track equivalences
@@ -1929,7 +1930,7 @@ public class Analyzer {
   }
 
   public void registerValueTransfer(SlotId id1, SlotId id2) {
-    globalState_.registeredValueTransfers.add(new Pair(id1, id2));
+    globalState_.registeredValueTransfers.add(new Pair<SlotId, SlotId>(id1, id2));
   }
 
   public boolean isOuterJoined(TupleId tid) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/CastExpr.java b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
index cd8bd36..4c010d8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CastExpr.java
@@ -29,6 +29,7 @@ import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TExpr;
 import org.apache.impala.thrift.TExprNode;
 import org.apache.impala.thrift.TExprNodeType;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -310,6 +311,11 @@ public class CastExpr extends Expr {
   }
 
   @Override
+  public boolean isImplicitCast() {
+    return isImplicit();
+  }
+
+  @Override
   public boolean localEquals(Expr that) {
     if (!super.localEquals(that)) return false;
     CastExpr other = (CastExpr) that;
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
index 401e576..c966f8bb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -257,8 +257,8 @@ public class ColumnDef {
         throw new AnalysisException(String.format("Only constant values are allowed " +
             "for default values: %s", defaultValue_.toSql()));
       }
-      if (defaultValLiteral.getType().isNull() && ((isNullable_ != null && !isNullable_)
-          || isPrimaryKey_)) {
+      if (Expr.IS_NULL_VALUE.apply(defaultValLiteral) &&
+          ((isNullable_ != null && !isNullable_) || isPrimaryKey_)) {
         throw new AnalysisException(String.format("Default value of NULL not allowed " +
             "on non-nullable column: '%s'", getColName()));
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 80f2258..3e743fa 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -131,6 +131,13 @@ public class ComputeStatsStmt extends StatementBase {
   private static final String STATS_FETCH_NUM_PARTITIONS_WITH_STATS =
       STATS_FETCH_PREFIX + ".NumPartitionsWithStats";
 
+  // The maximum number of partitions that may be explicitly selected by filter
+  // predicates. Any query that selects more than this automatically drops back to a full
+  // incremental stats recomputation.
+  // TODO: We can probably do better than this, e.g. running several queries, each of
+  // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
+  private static final int MAX_INCREMENTAL_PARTITIONS = 1000;
+
   protected final TableName tableName_;
   protected final TableSampleClause sampleParams_;
 
@@ -151,14 +158,14 @@ public class ComputeStatsStmt extends StatementBase {
   protected String columnStatsQueryStr_;
 
   // If true, stats will be gathered incrementally per-partition.
-  private boolean isIncremental_ = false;
+  private boolean isIncremental_;
 
   // If true, expect the compute stats process to produce output for all partitions in the
   // target table. In that case, 'expectedPartitions_' will be empty. The point of this
   // flag is to optimize the case where all partitions are targeted.
   // False for unpartitioned HDFS tables, non-HDFS tables or when stats extrapolation
   // is enabled.
-  private boolean expectAllPartitions_ = false;
+  private boolean expectAllPartitions_;
 
   // The list of valid partition statistics that can be used in an incremental computation
   // without themselves being recomputed. Populated in analyze().
@@ -173,23 +180,16 @@ public class ComputeStatsStmt extends StatementBase {
 
   // If non-null, partitions that an incremental computation might apply to. Must be
   // null if this is a non-incremental computation.
-  private PartitionSet partitionSet_ = null;
+  private PartitionSet partitionSet_;
 
   // If non-null, represents the user-specified list of columns for computing statistics.
   // Not supported for incremental statistics.
-  private List<String> columnWhitelist_ = null;
+  private List<String> columnWhitelist_;
 
   // The set of columns to be analyzed. Each column is valid: it must exist in the table
   // schema, it must be of a type that can be analyzed, and cannot refer to a partitioning
   // column for HDFS tables. If the set is null, no columns are restricted.
-  private Set<Column> validatedColumnWhitelist_ = null;
-
-  // The maximum number of partitions that may be explicitly selected by filter
-  // predicates. Any query that selects more than this automatically drops back to a full
-  // incremental stats recomputation.
-  // TODO: We can probably do better than this, e.g. running several queries, each of
-  // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
-  private static final int MAX_INCREMENTAL_PARTITIONS = 1000;
+  private Set<Column> validatedColumnWhitelist_;
 
   /**
    * Should only be constructed via static creation functions.
diff --git a/fe/src/main/java/org/apache/impala/analysis/Expr.java b/fe/src/main/java/org/apache/impala/analysis/Expr.java
index 6987d66..53a0081 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Expr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Expr.java
@@ -56,49 +56,51 @@ import com.google.common.collect.Lists;
 
 /**
  * Root of the expr node hierarchy.
- *
  */
 abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneable {
-  private final static Logger LOG = LoggerFactory.getLogger(Expr.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Expr.class);
 
   // Limits on the number of expr children and the depth of an expr tree. These maximum
   // values guard against crashes due to stack overflows (IMPALA-432) and were
   // experimentally determined to be safe.
-  public final static int EXPR_CHILDREN_LIMIT = 10000;
+  public static final int EXPR_CHILDREN_LIMIT = 10000;
   // The expr depth limit is mostly due to our recursive implementation of clone().
-  public final static int EXPR_DEPTH_LIMIT = 1000;
+  public static final int EXPR_DEPTH_LIMIT = 1000;
 
   // Name of the function that needs to be implemented by every Expr that
   // supports negation.
-  private final static String NEGATE_FN = "negate";
+  private static final String NEGATE_FN = "negate";
 
   // To be used where we cannot come up with a better estimate (selectivity_ is -1).
-  public static double DEFAULT_SELECTIVITY = 0.1;
+  public static final double DEFAULT_SELECTIVITY = 0.1;
 
   // The relative costs of different Exprs. These numbers are not intended as a precise
   // reflection of running times, but as simple heuristics for ordering Exprs from cheap
   // to expensive.
   // TODO(tmwarshall): Get these costs in a more principled way, eg. with a benchmark.
-  public final static float ARITHMETIC_OP_COST = 1;
-  public final static float BINARY_PREDICATE_COST = 1;
-  public final static float VAR_LEN_BINARY_PREDICATE_COST = 5;
-  public final static float CAST_COST = 1;
-  public final static float COMPOUND_PREDICATE_COST = 1;
-  public final static float FUNCTION_CALL_COST = 10;
-  public final static float IS_NOT_EMPTY_COST = 1;
-  public final static float IS_NULL_COST = 1;
-  public final static float LIKE_COST = 10;
-  public final static float LITERAL_COST = 1;
-  public final static float SLOT_REF_COST = 1;
-  public final static float TIMESTAMP_ARITHMETIC_COST = 5;
-  public final static float UNKNOWN_COST = -1;
+  public static final float ARITHMETIC_OP_COST = 1;
+  public static final float BINARY_PREDICATE_COST = 1;
+  public static final float VAR_LEN_BINARY_PREDICATE_COST = 5;
+  public static final float CAST_COST = 1;
+  public static final float COMPOUND_PREDICATE_COST = 1;
+  public static final float FUNCTION_CALL_COST = 10;
+  public static final float IS_NOT_EMPTY_COST = 1;
+  public static final float IS_NULL_COST = 1;
+  public static final float LIKE_COST = 10;
+  public static final float LITERAL_COST = 1;
+  public static final float SLOT_REF_COST = 1;
+  public static final float TIMESTAMP_ARITHMETIC_COST = 5;
+  public static final float UNKNOWN_COST = -1;
+
+  // Arbitrary max exprs considered for constant propagation due to O(n^2) complexity.
+  private static final int CONST_PROPAGATION_EXPR_LIMIT = 200;
 
   // To be used when estimating the cost of Exprs of type string where we don't otherwise
   // have an estimate of how long the strings produced by that Expr are.
-  public final static int DEFAULT_AVG_STRING_LENGTH = 5;
+  public static final int DEFAULT_AVG_STRING_LENGTH = 5;
 
   // returns true if an Expr is a non-analytic aggregate.
-  private final static com.google.common.base.Predicate<Expr> isAggregatePredicate_ =
+  public static final com.google.common.base.Predicate<Expr> IS_AGGREGATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -108,7 +110,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       };
 
   // Returns true if an Expr is a NOT CompoundPredicate.
-  public final static com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -118,7 +120,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       };
 
   // Returns true if an Expr is an OR CompoundPredicate.
-  public final static com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -128,7 +130,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       };
 
   // Returns true if an Expr is a scalar subquery
-  public final static com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
+  public static final com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -138,7 +140,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
 
   // Returns true if an Expr is an aggregate function that returns non-null on
   // an empty set (e.g. count).
-  public final static com.google.common.base.Predicate<Expr>
+  public static final com.google.common.base.Predicate<Expr>
       NON_NULL_EMPTY_AGG = new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -148,7 +150,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       };
 
   // Returns true if an Expr is a builtin aggregate function.
-  public final static com.google.common.base.Predicate<Expr> IS_BUILTIN_AGG_FN =
+  public static final com.google.common.base.Predicate<Expr> IS_BUILTIN_AGG_FN =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -158,16 +160,16 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       };
 
   // Returns true if an Expr is a user-defined aggregate function.
-  public final static com.google.common.base.Predicate<Expr> IS_UDA_FN =
+  public static final com.google.common.base.Predicate<Expr> IS_UDA_FN =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
-          return isAggregatePredicate_.apply(arg) &&
+          return IS_AGGREGATE.apply(arg) &&
               !((FunctionCallExpr)arg).getFnName().isBuiltin();
         }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -175,7 +177,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
         }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_FALSE_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_FALSE_LITERAL =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -183,13 +185,13 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
         }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) { return BinaryPredicate.getEqSlots(arg) != null; }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_NOT_EQ_BINARY_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_NOT_EQ_BINARY_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -199,14 +201,14 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
         }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) { return arg instanceof BinaryPredicate; }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_EXPR_EQ_LITERAL_PREDICATE =
-      new com.google.common.base.Predicate<Expr>() {
+  public static final com.google.common.base.Predicate<Expr>
+    IS_EXPR_EQ_LITERAL_PREDICATE = new com.google.common.base.Predicate<Expr>() {
     @Override
     public boolean apply(Expr arg) {
       return arg instanceof BinaryPredicate
@@ -215,7 +217,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     }
   };
 
-  public final static com.google.common.base.Predicate<Expr>
+  public static final com.google.common.base.Predicate<Expr>
       IS_NONDETERMINISTIC_BUILTIN_FN_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
@@ -225,7 +227,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
         }
       };
 
-  public final static com.google.common.base.Predicate<Expr> IS_UDF_PREDICATE =
+  public static final com.google.common.base.Predicate<Expr> IS_UDF_PREDICATE =
       new com.google.common.base.Predicate<Expr>() {
         @Override
         public boolean apply(Expr arg) {
@@ -237,7 +239,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   /**
    * @return true if the expression is a literal.
    */
-  public final static com.google.common.base.Predicate<Expr> IS_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_LITERAL =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -248,7 +250,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   /**
    * @return true if the expression is a null literal.
    */
-  public final static com.google.common.base.Predicate<Expr> IS_NULL_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_NULL_LITERAL =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -259,7 +261,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   /**
    * @return true if the expression is a literal value other than NULL.
    */
-  public final static com.google.common.base.Predicate<Expr> IS_NON_NULL_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_NON_NULL_LITERAL =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -271,7 +273,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
    * @return true if the expression is a null literal, or a
    * cast of a null (as created by the ConstantFoldingRule.)
    */
-  public final static com.google.common.base.Predicate<Expr> IS_NULL_VALUE =
+  public static final com.google.common.base.Predicate<Expr> IS_NULL_VALUE =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -285,7 +287,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
    * @return true if the expression is a  literal, or a
    * cast of a null (as created by the ConstantFoldingRule.)
    */
-  public final static com.google.common.base.Predicate<Expr> IS_LITERAL_VALUE =
+  public static final com.google.common.base.Predicate<Expr> IS_LITERAL_VALUE =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -293,7 +295,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
       }
     };
 
-  public final static com.google.common.base.Predicate<Expr> IS_INT_LITERAL =
+  public static final com.google.common.base.Predicate<Expr> IS_INT_LITERAL =
     new com.google.common.base.Predicate<Expr>() {
       @Override
       public boolean apply(Expr arg) {
@@ -346,7 +348,6 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   private boolean isAnalyzed_ = false;
 
   protected Expr() {
-    super();
     type_ = Type.INVALID;
     selectivity_ = -1.0;
     evalCost_ = -1.0f;
@@ -662,8 +663,8 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
         this instanceof BinaryPredicate);
     // This heuristic conversion is not part of DECIMAL_V2.
     if (analyzer.getQueryOptions().isDecimal_v2()) return;
-    if (children_.size() == 1) return; // Do not attempt to convert for unary ops
-    Preconditions.checkState(children_.size() == 2);
+    if (getChildCount() == 1) return; // Do not attempt to convert for unary ops
+    Preconditions.checkState(getChildCount() == 2);
     Type t0 = getChild(0).getType();
     Type t1 = getChild(1).getType();
     boolean c0IsConstantDecimal = getChild(0).isConstant() && t0.isDecimal();
@@ -784,12 +785,8 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     return result;
   }
 
-  public static com.google.common.base.Predicate<Expr> isAggregatePredicate() {
-    return isAggregatePredicate_;
-  }
-
   public boolean isAggregate() {
-    return isAggregatePredicate_.apply(this);
+    return IS_AGGREGATE.apply(this);
   }
 
   public List<String> childrenToSql(ToSqlOptions options) {
@@ -1115,9 +1112,6 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     }
   }
 
-  // Arbitrary max exprs considered for constant propagation due to O(n^2) complexity.
-  private final static int CONST_PROPAGATION_EXPR_LIMIT = 200;
-
   /**
    * Propagates constant expressions of the form <slot ref> = <constant> to
    * other uses of slot ref in the given conjuncts; returns a BitSet with
@@ -1402,24 +1396,19 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   /**
    * Returns child expr if this expr is an implicit cast, otherwise returns 'this'.
    */
-  public Expr ignoreImplicitCast() {
-    if (isImplicitCast()) return getChild(0).ignoreImplicitCast();
-    return this;
-  }
+  public Expr ignoreImplicitCast() { return this; }
 
   /**
    * Returns true if 'this' is an implicit cast expr.
    */
-  public boolean isImplicitCast() {
-    return this instanceof CastExpr && ((CastExpr) this).isImplicit();
-  }
+  public boolean isImplicitCast() { return false; }
 
   @Override
   public String toString() {
     return Objects.toStringHelper(this.getClass())
         .add("id", id_)
         .add("type", type_)
-        .add("toSql", toSql())
+        .add("toSql", toSql(ToSqlOptions.SHOW_IMPLICIT_CASTS))
         .add("sel", selectivity_)
         .add("evalCost", evalCost_)
         .add("#distinct", numDistinctValues_)
diff --git a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
index 8047b29..ccc3c3e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/FunctionCallExpr.java
@@ -232,7 +232,6 @@ public class FunctionCallExpr extends Expr {
         .toString();
   }
 
-  public FunctionParams getParams() { return params_; }
   public boolean isScalarFunction() {
     Preconditions.checkNotNull(fn_);
     return fn_ instanceof ScalarFunction ;
@@ -271,16 +270,14 @@ public class FunctionCallExpr extends Expr {
     return ((AggregateFunction)fn_).ignoresDistinct();
   }
 
+  public FunctionParams getParams() { return params_; }
   public FunctionName getFnName() { return fnName_; }
   public void setIsAnalyticFnCall(boolean v) { isAnalyticFnCall_ = v; }
   public void setIsInternalFnCall(boolean v) { isInternalFnCall_ = v; }
 
   static boolean isNondeterministicBuiltinFnName(String fnName) {
-    if (fnName.equalsIgnoreCase("rand") || fnName.equalsIgnoreCase("random")
-        || fnName.equalsIgnoreCase("uuid")) {
-      return true;
-    }
-    return false;
+    return fnName.equalsIgnoreCase("rand") || fnName.equalsIgnoreCase("random") ||
+           fnName.equalsIgnoreCase("uuid");
   }
 
   /**
@@ -559,7 +556,7 @@ public class FunctionCallExpr extends Expr {
 
     if (isAggregateFunction()) {
       // subexprs must not contain aggregates
-      if (TreeNode.contains(children_, Expr.isAggregatePredicate())) {
+      if (TreeNode.contains(children_, Expr.IS_AGGREGATE)) {
         throw new AnalysisException(
             "aggregate function must not contain aggregate parameters: " + this.toSql());
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index 5993235..5c9503f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -326,18 +326,15 @@ public class InlineViewRef extends TableRef {
   protected String tableRefToSql(ToSqlOptions options) {
     // Enclose the alias in quotes if Hive cannot parse it without quotes.
     // This is needed for view compatibility between Impala and Hive.
-    String aliasSql = null;
     String alias = getExplicitAlias();
-    if (alias != null) aliasSql = ToSqlUtils.getIdentSql(alias);
     if (view_ != null) {
-      return view_.getTableName().toSql() + (aliasSql == null ? "" : " " + aliasSql);
+      return view_.getTableName().toSql() + ToSqlUtils.formatAlias(alias);
     }
-    Preconditions.checkNotNull(aliasSql);
     StringBuilder sql = new StringBuilder()
-                            .append("(")
-                            .append(queryStmt_.toSql(options))
-                            .append(") ")
-                            .append(aliasSql);
+        .append("(")
+        .append(queryStmt_.toSql(options))
+        .append(")")
+        .append(ToSqlUtils.formatAlias(alias));
     // Add explicit col labels for debugging even though this syntax isn't supported.
     if (explicitColLabels_ != null) {
       sql.append(" (");
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
index 1add747..1aebcbe 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
@@ -43,12 +43,12 @@ public class PartitionKeyValue {
   }
 
   public void analyze(Analyzer analyzer) throws AnalysisException {
+    if (value_ == null) return;
     if (isStatic() && !value_.isConstant()) {
       throw new AnalysisException(
           String.format("Non-constant expressions are not supported " +
               "as static partition-key values in '%s'.", toString()));
     }
-    if (value_ == null) return;
     value_.analyze(analyzer);
     literalValue_ = LiteralExpr.create(value_, analyzer.getQueryCtx());
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
index a80608b..f8cc7dc 100644
--- a/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/QueryStmt.java
@@ -393,7 +393,7 @@ public abstract class QueryStmt extends StatementBase {
       return resultExprs_.get((int) pos - 1).clone();
     }
 
-    // Ordinary expression..
+    // Ordinary expression.
     return expr;
   }
 
diff --git a/fe/src/main/java/org/apache/impala/analysis/RangePartition.java b/fe/src/main/java/org/apache/impala/analysis/RangePartition.java
index c1dbbcc..c3abf50 100644
--- a/fe/src/main/java/org/apache/impala/analysis/RangePartition.java
+++ b/fe/src/main/java/org/apache/impala/analysis/RangePartition.java
@@ -169,7 +169,7 @@ public class RangePartition extends StmtNode {
       throw new AnalysisException(String.format("Only constant values are allowed " +
           "for range-partition bounds: %s", value.toSql()));
     }
-    if (literal.getType().isNull()) {
+    if (Expr.IS_NULL_VALUE.apply(literal)) {
       throw new AnalysisException(String.format("Range partition values cannot be " +
           "NULL. Range partition: '%s'", toSql()));
     }
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index ff45a56..b517e3f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -344,7 +344,7 @@ public class SelectStmt extends QueryStmt {
     private void analyzeWhereClause() throws AnalysisException {
       if (whereClause_ == null) return;
       whereClause_.analyze(analyzer_);
-      if (whereClause_.contains(Expr.isAggregatePredicate())) {
+      if (whereClause_.contains(Expr.IS_AGGREGATE)) {
         throw new AnalysisException(
             "aggregate function not allowed in WHERE clause");
       }
@@ -586,12 +586,12 @@ public class SelectStmt extends QueryStmt {
 
     private boolean checkForAggregates() throws AnalysisException {
       if (groupingExprs_ == null && !selectList_.isDistinct()
-          && !TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())
+          && !TreeNode.contains(resultExprs_, Expr.IS_AGGREGATE)
           && (havingPred_ == null
-              || !havingPred_.contains(Expr.isAggregatePredicate()))
+              || !havingPred_.contains(Expr.IS_AGGREGATE))
           && (sortInfo_ == null
               || !TreeNode.contains(sortInfo_.getSortExprs(),
-                                    Expr.isAggregatePredicate()))) {
+                                    Expr.IS_AGGREGATE))) {
         // We're not computing aggregates but we still need to register the HAVING
         // clause which could, e.g., contain a constant expression evaluating to false.
         if (havingPred_ != null) analyzer_.registerConjuncts(havingPred_, true);
@@ -609,9 +609,9 @@ public class SelectStmt extends QueryStmt {
 
       if (selectList_.isDistinct()
           && (groupingExprs_ != null
-              || TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())
+              || TreeNode.contains(resultExprs_, Expr.IS_AGGREGATE)
               || (havingPred_ != null
-                  && havingPred_.contains(Expr.isAggregatePredicate())))) {
+                  && havingPred_.contains(Expr.IS_AGGREGATE)))) {
         throw new AnalysisException(
           "cannot combine SELECT DISTINCT with aggregate functions or GROUP BY");
       }
@@ -620,7 +620,7 @@ public class SelectStmt extends QueryStmt {
       // '*', and if you need to name all star-expanded cols in the group by clause you
       // might as well do it in the select list).
       if (groupingExprs_ != null ||
-          TreeNode.contains(resultExprs_, Expr.isAggregatePredicate())) {
+          TreeNode.contains(resultExprs_, Expr.IS_AGGREGATE)) {
         for (SelectListItem item : selectList_.getItems()) {
           if (item.isStar()) {
             throw new AnalysisException(
@@ -652,7 +652,7 @@ public class SelectStmt extends QueryStmt {
 
       for (int i = 0; i < groupingExprsCopy_.size(); ++i) {
         groupingExprsCopy_.get(i).analyze(analyzer_);
-        if (groupingExprsCopy_.get(i).contains(Expr.isAggregatePredicate())) {
+        if (groupingExprsCopy_.get(i).contains(Expr.IS_AGGREGATE)) {
           // reference the original expr in the error msg
           throw new AnalysisException(
               "GROUP BY expression must not contain aggregate functions: "
@@ -671,13 +671,13 @@ public class SelectStmt extends QueryStmt {
       // Collect the aggregate expressions from the SELECT, HAVING and ORDER BY clauses
       // of this statement.
       aggExprs_ = new ArrayList<>();
-      TreeNode.collect(resultExprs_, Expr.isAggregatePredicate(), aggExprs_);
+      TreeNode.collect(resultExprs_, Expr.IS_AGGREGATE, aggExprs_);
       if (havingPred_ != null) {
-        havingPred_.collect(Expr.isAggregatePredicate(), aggExprs_);
+        havingPred_.collect(Expr.IS_AGGREGATE, aggExprs_);
       }
       if (sortInfo_ != null) {
         // TODO: Avoid evaluating aggs in ignored order-bys
-        TreeNode.collect(sortInfo_.getSortExprs(), Expr.isAggregatePredicate(),
+        TreeNode.collect(sortInfo_.getSortExprs(), Expr.IS_AGGREGATE,
             aggExprs_);
       }
     }
@@ -727,7 +727,7 @@ public class SelectStmt extends QueryStmt {
       List<Expr> substitutedAggs =
           Expr.substituteList(aggExprs_, countAllMap_, analyzer_, false);
       aggExprs_.clear();
-      TreeNode.collect(substitutedAggs, Expr.isAggregatePredicate(), aggExprs_);
+      TreeNode.collect(substitutedAggs, Expr.IS_AGGREGATE, aggExprs_);
 
       List<Expr> groupingExprs = groupingExprsCopy_;
       if (selectList_.isDistinct()) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
index bd3d543..b329b51 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SlotRef.java
@@ -25,6 +25,7 @@ import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.UnsupportedFeatureException;
 import org.apache.impala.thrift.TExprNode;
 import org.apache.impala.thrift.TExprNodeType;
 import org.apache.impala.thrift.TSlotRef;
@@ -102,14 +103,14 @@ public class SlotRef extends Expr {
     desc_ = analyzer.registerSlotRef(resolvedPath);
     type_ = desc_.getType();
     if (!type_.isSupported()) {
-      throw new AnalysisException("Unsupported type '"
+      throw new UnsupportedFeatureException("Unsupported type '"
           + type_.toSql() + "' in '" + toSql() + "'.");
     }
     if (type_.isInvalid()) {
       // In this case, the metastore contained a string we can't parse at all
       // e.g. map. We could report a better error if we stored the original
       // HMS string.
-      throw new AnalysisException("Unsupported type in '" + toSql() + "'.");
+      throw new UnsupportedFeatureException("Unsupported type in '" + toSql() + "'.");
     }
 
     numDistinctValues_ = desc_.getStats().getNumDistinctValues();
@@ -234,10 +235,24 @@ public class SlotRef extends Expr {
 
   @Override
   public String toString() {
+    StringBuilder buf = new StringBuilder();
+    if (rawPath_ != null) {
+      buf.append(String.join(".", rawPath_));
+    } else if (label_ != null) {
+      buf.append(label_);
+    }
+    boolean closeParen = buf.length() > 0;
+    if (closeParen) buf.append(" (");
     if (desc_ != null) {
-      return "tid=" + desc_.getParent().getId() + " sid=" + desc_.getId();
+      buf.append("tid=")
+        .append(desc_.getParent().getId())
+        .append(" sid=")
+        .append(desc_.getId());
+    } else {
+      buf.append("no desc set");
     }
-    return "no desc set";
+    if (closeParen) buf.append(")");
+    return buf.toString();
   }
 
   @Override
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index f0acc41..f86236b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -534,7 +534,7 @@ public class TableRef extends StmtNode {
       onClause_.analyze(analyzer);
       analyzer.setVisibleSemiJoinedTuple(null);
       onClause_.checkReturnsBool("ON clause", true);
-        if (onClause_.contains(Expr.isAggregatePredicate())) {
+        if (onClause_.contains(Expr.IS_AGGREGATE)) {
           throw new AnalysisException(
               "aggregate function not allowed in ON clause: " + toSql());
       }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 3909eb8..0951c40 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -548,4 +548,9 @@ public class ToSqlUtils {
     }
     return sb.toString();
   }
+
+  public static String formatAlias(String alias) {
+    if (alias == null) return "";
+    return " " + getIdentSql(alias);
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index 83c6c30..87b8e5f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -206,6 +206,9 @@ public class TupleDescriptor {
         .toString();
   }
 
+  @Override
+  public String toString() { return debugString(); }
+
   /**
    * Checks that this tuple is materialized and has a mem layout. Throws if this tuple
    * is not executable, i.e., if one of those conditions is not met.
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
index 9840a2c..d10dadc 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
@@ -222,12 +222,12 @@ public abstract class FeCatalogUtils {
       List<String> hmsPartitionValues) throws CatalogException {
     Preconditions.checkArgument(
         hmsPartitionValues.size() == table.getNumClusteringCols(),
-        "Cannot parse partition values %s for table %s: " +
-        "expected %s values but got %s",
+        "Cannot parse partition values '%s' for table %s: " +
+        "expected %d values but got %d",
         hmsPartitionValues, table.getFullName(),
         table.getNumClusteringCols(), hmsPartitionValues.size());
     List<LiteralExpr> keyValues = new ArrayList<>();
-    for (String partitionKey: hmsPartitionValues) {
+    for (String partitionKey : hmsPartitionValues) {
       Type type = table.getColumns().get(keyValues.size()).getType();
       // Deal with Hive's special NULL partition key.
       if (partitionKey.equals(table.getNullPartitionKeyValue())) {
@@ -236,7 +236,9 @@ public abstract class FeCatalogUtils {
         try {
           keyValues.add(LiteralExpr.create(partitionKey, type));
         } catch (Exception ex) {
-          LOG.warn("Failed to create literal expression of type: " + type, ex);
+          LOG.warn(String.format(
+              "Failed to create literal expression: type: %s, value: '%s'",
+              type.toSql(), partitionKey), ex);
           throw new CatalogException("Invalid partition key value of type: " + type,
               ex);
         }
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index 8c5db82..b9e5c55 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -481,6 +481,76 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
     }
   }
 
+  // Struct-style class for caching all the information we need to reconstruct an
+  // HMS-compatible Partition object, for use in RPCs to the metastore. We do this rather
+  // than cache the Thrift partition object itself as the latter can be large - thanks
+  // mostly to the inclusion of the full FieldSchema list. This class is read-only - if
+  // any field can be mutated by Impala it should belong to HdfsPartition itself (see
+  // HdfsPartition.location_ for an example).
+  //
+  // TODO: Cache this descriptor in HdfsTable so that identical descriptors are shared
+  // between HdfsPartition instances.
+  // TODO: sdInputFormat and sdOutputFormat can be mutated by Impala when the file format
+  // of a partition changes; move these fields to HdfsPartition.
+  private static class CachedHmsPartitionDescriptor {
+    public String sdInputFormat;
+    public String sdOutputFormat;
+    public final boolean sdCompressed;
+    public final int sdNumBuckets;
+    public final org.apache.hadoop.hive.metastore.api.SerDeInfo sdSerdeInfo;
+    public final List<String> sdBucketCols;
+    public final List<org.apache.hadoop.hive.metastore.api.Order> sdSortCols;
+    public final Map<String, String> sdParameters;
+    public final int msCreateTime;
+    public final int msLastAccessTime;
+
+    public CachedHmsPartitionDescriptor(
+        org.apache.hadoop.hive.metastore.api.Partition msPartition) {
+      org.apache.hadoop.hive.metastore.api.StorageDescriptor sd = null;
+      if (msPartition != null) {
+        sd = msPartition.getSd();
+        CatalogInterners.internFieldsInPlace(sd);
+        msCreateTime = msPartition.getCreateTime();
+        msLastAccessTime = msPartition.getLastAccessTime();
+      } else {
+        msCreateTime = msLastAccessTime = 0;
+      }
+      if (sd != null) {
+        sdInputFormat = sd.getInputFormat();
+        sdOutputFormat = sd.getOutputFormat();
+        sdCompressed = sd.isCompressed();
+        sdNumBuckets = sd.getNumBuckets();
+        sdSerdeInfo = sd.getSerdeInfo();
+        sdBucketCols = ImmutableList.copyOf(sd.getBucketCols());
+        sdSortCols = ImmutableList.copyOf(sd.getSortCols());
+        sdParameters = ImmutableMap.copyOf(CatalogInterners.internParameters(
+            sd.getParameters()));
+      } else {
+        sdInputFormat = "";
+        sdOutputFormat = "";
+        sdCompressed = false;
+        sdNumBuckets = 0;
+        sdSerdeInfo = null;
+        sdBucketCols = ImmutableList.of();
+        sdSortCols = ImmutableList.of();
+        sdParameters = ImmutableMap.of();
+      }
+    }
+  }
+
+  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartition.class);
+
+  // A predicate for checking if a given string is a key used for serializing
+  // TPartitionStats to HMS parameters.
+  private static Predicate<String> IS_INCREMENTAL_STATS_KEY =
+      new Predicate<String>() {
+        @Override
+        public boolean apply(String key) {
+          return key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_NUM_CHUNKS)
+              || key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_CHUNK_PREFIX);
+        }
+      };
+
   private final HdfsTable table_;
   private final List<LiteralExpr> partitionKeyValues_;
   // estimated number of rows in partition; -1: unknown
@@ -514,11 +584,10 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
   @Nonnull
   private ImmutableList<byte[]> encodedFileDescriptors_;
   private HdfsPartitionLocationCompressor.Location location_;
-  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartition.class);
-  private boolean isDirty_ = false;
+  private boolean isDirty_;
   // True if this partition is marked as cached. Does not necessarily mean the data is
   // cached.
-  private boolean isMarkedCached_ = false;
+  private boolean isMarkedCached_;
   private final TAccessLevel accessLevel_;
 
   // (k,v) pairs of parameters for this partition, stored in the HMS.
@@ -526,21 +595,54 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
 
   // Binary representation of the TPartitionStats for this partition. Populated
   // when the partition is loaded and updated using setPartitionStatsBytes().
-  private byte[] partitionStats_ = null;
+  private byte[] partitionStats_;
 
   // True if partitionStats_ has intermediate_col_stats populated.
-  private boolean hasIncrementalStats_ = false;
+  private boolean hasIncrementalStats_ ;
 
-  // A predicate for checking if a given string is a key used for serializing
-  // TPartitionStats to HMS parameters.
-  private static Predicate<String> IS_INCREMENTAL_STATS_KEY =
-      new Predicate<String>() {
-        @Override
-        public boolean apply(String key) {
-          return key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_NUM_CHUNKS)
-              || key.startsWith(PartitionStatsUtil.INCREMENTAL_STATS_CHUNK_PREFIX);
-        }
-      };
+  private HdfsPartition(HdfsTable table,
+      org.apache.hadoop.hive.metastore.api.Partition msPartition,
+      List<LiteralExpr> partitionKeyValues,
+      HdfsStorageDescriptor fileFormatDescriptor,
+      List<HdfsPartition.FileDescriptor> fileDescriptors, long id,
+      HdfsPartitionLocationCompressor.Location location, TAccessLevel accessLevel) {
+    table_ = table;
+    if (msPartition == null) {
+      cachedMsPartitionDescriptor_ = null;
+    } else {
+      cachedMsPartitionDescriptor_ = new CachedHmsPartitionDescriptor(msPartition);
+    }
+    location_ = location;
+    partitionKeyValues_ = ImmutableList.copyOf(partitionKeyValues);
+    setFileDescriptors(fileDescriptors);
+    fileFormatDescriptor_ = fileFormatDescriptor;
+    id_ = id;
+    accessLevel_ = accessLevel;
+    if (msPartition != null && msPartition.getParameters() != null) {
+      isMarkedCached_ = HdfsCachingUtil.getCacheDirectiveId(
+          msPartition.getParameters()) != null;
+      hmsParameters_ = msPartition.getParameters();
+    } else {
+      hmsParameters_ = new HashMap<>();
+    }
+    extractAndCompressPartStats();
+    // Intern parameters after removing the incremental stats
+    hmsParameters_ = CatalogInterners.internParameters(hmsParameters_);
+  }
+
+  public HdfsPartition(HdfsTable table,
+      org.apache.hadoop.hive.metastore.api.Partition msPartition,
+      List<LiteralExpr> partitionKeyValues,
+      HdfsStorageDescriptor fileFormatDescriptor,
+      List<HdfsPartition.FileDescriptor> fileDescriptors,
+      TAccessLevel accessLevel) {
+    this(table, msPartition, partitionKeyValues, fileFormatDescriptor, fileDescriptors,
+        partitionIdCounter_.getAndIncrement(),
+        table.getPartitionLocationCompressor().new Location(msPartition != null
+                ? msPartition.getSd().getLocation()
+                : table.getLocation()),
+        accessLevel);
+  }
 
   @Override // FeFsPartition
   public HdfsStorageDescriptor getInputFormatDescriptor() {
@@ -613,6 +715,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
   @Override
   public boolean isMarkedCached() { return isMarkedCached_; }
   void markCached() { isMarkedCached_ = true; }
+  private CachedHmsPartitionDescriptor cachedMsPartitionDescriptor_;
 
   /**
    * Updates the file format of this partition and sets the corresponding input/output
@@ -665,7 +768,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
       // Convert the stats stored in the hmsParams map to a deflate-compressed in-memory
       // byte array format. After conversion, delete the entries in the hmsParams map
       // as they are not needed anymore.
-      Reference<Boolean> hasIncrStats = new Reference(false);
+      Reference<Boolean> hasIncrStats = new Reference<Boolean>(false);
       byte[] partitionStats =
           PartitionStatsUtil.partStatsBytesFromParameters(hmsParameters_, hasIncrStats);
       setPartitionStatsBytes(partitionStats, hasIncrStats.getRef());
@@ -703,6 +806,7 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
     Preconditions.checkArgument(!IS_INCREMENTAL_STATS_KEY.apply(k));
     hmsParameters_.put(k, v);
   }
+
   public void putToParameters(Pair<String, String> kv) {
     putToParameters(kv.first, kv.second);
   }
@@ -719,17 +823,20 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
   public List<LiteralExpr> getPartitionValues() { return partitionKeyValues_; }
   @Override // FeFsPartition
   public LiteralExpr getPartitionValue(int i) { return partitionKeyValues_.get(i); }
+
   @Override // FeFsPartition
   public List<HdfsPartition.FileDescriptor> getFileDescriptors() {
     // Return a lazily transformed list from our internal bytes storage.
     return Lists.transform(encodedFileDescriptors_, FileDescriptor.FROM_BYTES);
   }
+
   public void setFileDescriptors(List<FileDescriptor> descriptors) {
     // Store an eagerly transformed-and-copied list so that we drop the memory usage
     // of the flatbuffer wrapper.
     encodedFileDescriptors_ = ImmutableList.copyOf(Lists.transform(
         descriptors, FileDescriptor.TO_BYTES));
   }
+
   @Override // FeFsPartition
   public int getNumFileDescriptors() {
     return encodedFileDescriptors_.size();
@@ -738,65 +845,6 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
   @Override
   public boolean hasFileDescriptors() { return !encodedFileDescriptors_.isEmpty(); }
 
-  // Struct-style class for caching all the information we need to reconstruct an
-  // HMS-compatible Partition object, for use in RPCs to the metastore. We do this rather
-  // than cache the Thrift partition object itself as the latter can be large - thanks
-  // mostly to the inclusion of the full FieldSchema list. This class is read-only - if
-  // any field can be mutated by Impala it should belong to HdfsPartition itself (see
-  // HdfsPartition.location_ for an example).
-  //
-  // TODO: Cache this descriptor in HdfsTable so that identical descriptors are shared
-  // between HdfsPartition instances.
-  // TODO: sdInputFormat and sdOutputFormat can be mutated by Impala when the file format
-  // of a partition changes; move these fields to HdfsPartition.
-  private static class CachedHmsPartitionDescriptor {
-    public String sdInputFormat;
-    public String sdOutputFormat;
-    public final boolean sdCompressed;
-    public final int sdNumBuckets;
-    public final org.apache.hadoop.hive.metastore.api.SerDeInfo sdSerdeInfo;
-    public final List<String> sdBucketCols;
-    public final List<org.apache.hadoop.hive.metastore.api.Order> sdSortCols;
-    public final Map<String, String> sdParameters;
-    public final int msCreateTime;
-    public final int msLastAccessTime;
-
-    public CachedHmsPartitionDescriptor(
-        org.apache.hadoop.hive.metastore.api.Partition msPartition) {
-      org.apache.hadoop.hive.metastore.api.StorageDescriptor sd = null;
-      if (msPartition != null) {
-        sd = msPartition.getSd();
-        CatalogInterners.internFieldsInPlace(sd);
-        msCreateTime = msPartition.getCreateTime();
-        msLastAccessTime = msPartition.getLastAccessTime();
-      } else {
-        msCreateTime = msLastAccessTime = 0;
-      }
-      if (sd != null) {
-        sdInputFormat = sd.getInputFormat();
-        sdOutputFormat = sd.getOutputFormat();
-        sdCompressed = sd.isCompressed();
-        sdNumBuckets = sd.getNumBuckets();
-        sdSerdeInfo = sd.getSerdeInfo();
-        sdBucketCols = ImmutableList.copyOf(sd.getBucketCols());
-        sdSortCols = ImmutableList.copyOf(sd.getSortCols());
-        sdParameters = ImmutableMap.copyOf(CatalogInterners.internParameters(
-            sd.getParameters()));
-      } else {
-        sdInputFormat = "";
-        sdOutputFormat = "";
-        sdCompressed = false;
-        sdNumBuckets = 0;
-        sdSerdeInfo = null;
-        sdBucketCols = ImmutableList.of();
-        sdSortCols = ImmutableList.of();
-        sdParameters = ImmutableMap.of();
-      }
-    }
-  }
-
-  private CachedHmsPartitionDescriptor cachedMsPartitionDescriptor_;
-
   public CachedHmsPartitionDescriptor getCachedMsPartitionDescriptor() {
     return cachedMsPartitionDescriptor_;
   }
@@ -834,50 +882,6 @@ public class HdfsPartition implements FeFsPartition, PrunablePartition {
     return partition;
   }
 
-  private HdfsPartition(HdfsTable table,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition,
-      List<LiteralExpr> partitionKeyValues,
-      HdfsStorageDescriptor fileFormatDescriptor,
-      List<HdfsPartition.FileDescriptor> fileDescriptors, long id,
-      HdfsPartitionLocationCompressor.Location location, TAccessLevel accessLevel) {
-    table_ = table;
-    if (msPartition == null) {
-      cachedMsPartitionDescriptor_ = null;
-    } else {
-      cachedMsPartitionDescriptor_ = new CachedHmsPartitionDescriptor(msPartition);
-    }
-    location_ = location;
-    partitionKeyValues_ = ImmutableList.copyOf(partitionKeyValues);
-    setFileDescriptors(fileDescriptors);
-    fileFormatDescriptor_ = fileFormatDescriptor;
-    id_ = id;
-    accessLevel_ = accessLevel;
-    if (msPartition != null && msPartition.getParameters() != null) {
-      isMarkedCached_ = HdfsCachingUtil.getCacheDirectiveId(
-          msPartition.getParameters()) != null;
-      hmsParameters_ = msPartition.getParameters();
-    } else {
-      hmsParameters_ = new HashMap<>();
-    }
-    extractAndCompressPartStats();
-    // Intern parameters after removing the incremental stats
-    hmsParameters_ = CatalogInterners.internParameters(hmsParameters_);
-  }
-
-  public HdfsPartition(HdfsTable table,
-      org.apache.hadoop.hive.metastore.api.Partition msPartition,
-      List<LiteralExpr> partitionKeyValues,
-      HdfsStorageDescriptor fileFormatDescriptor,
-      List<HdfsPartition.FileDescriptor> fileDescriptors,
-      TAccessLevel accessLevel) {
-    this(table, msPartition, partitionKeyValues, fileFormatDescriptor, fileDescriptors,
-        partitionIdCounter_.getAndIncrement(),
-        table.getPartitionLocationCompressor().new Location(msPartition != null
-                ? msPartition.getSd().getLocation()
-                : table.getLocation()),
-        accessLevel);
-  }
-
   public static HdfsPartition prototypePartition(
       HdfsTable table, HdfsStorageDescriptor storageDescriptor) {
     List<LiteralExpr> emptyExprList = new ArrayList<>();
diff --git a/fe/src/main/java/org/apache/impala/common/InvalidValueException.java b/fe/src/main/java/org/apache/impala/common/InvalidValueException.java
index 9be30e1..e21fc73 100644
--- a/fe/src/main/java/org/apache/impala/common/InvalidValueException.java
+++ b/fe/src/main/java/org/apache/impala/common/InvalidValueException.java
@@ -28,7 +28,7 @@ public class InvalidValueException extends AnalysisException {
     super(msg);
   }
 
-  public InvalidValueException(String msg, NumberFormatException e) {
+  public InvalidValueException(String msg, Exception e) {
     super(msg, e);
   }
 
diff --git a/fe/src/main/java/org/apache/impala/common/TreeNode.java b/fe/src/main/java/org/apache/impala/common/TreeNode.java
index c5e7a87..b88ac3c 100644
--- a/fe/src/main/java/org/apache/impala/common/TreeNode.java
+++ b/fe/src/main/java/org/apache/impala/common/TreeNode.java
@@ -49,6 +49,7 @@ public abstract class TreeNode<NodeType extends TreeNode<NodeType>> {
   public boolean hasChild(int i) { return children_.size() > i; }
   public void setChild(int index, NodeType n) { children_.set(index, n); }
   public List<NodeType> getChildren() { return children_; }
+  public int getChildCount() { return children_.size(); }
 
   /**
    * Return list of all nodes of the tree rooted at 'this', obtained
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index e9deb44..1706520 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -97,8 +97,8 @@ public class HdfsPartitionPruner {
   }
 
   /**
-   * Return a list of partitions left after applying the conjuncts. Please note
-   * that conjuncts used for filtering will be removed from the list 'conjuncts' and
+   * Return a list of partitions left after applying the conjuncts.
+   * Conjuncts used for filtering will be removed from the list 'conjuncts' and
    * returned as the second item in the returned Pair. These expressions can be
    * shown in the EXPLAIN output.
    *
@@ -189,7 +189,11 @@ public class HdfsPartitionPruner {
     if (expr instanceof BinaryPredicate) {
       // Evaluate any constant expression in the BE
       try {
-        analyzer.getConstantFolder().rewrite(expr, analyzer);
+        // TODO: Analyzer should already have done constant folding
+        // and rewrite -- unless this is a copy of an expression taken
+        // before analysis, which would introduce its own issues.
+        expr = analyzer.getConstantFolder().rewrite(expr, analyzer);
+        Preconditions.checkState(expr instanceof BinaryPredicate);
       } catch (AnalysisException e) {
         LOG.error("Error evaluating constant expressions in the BE: " + e.getMessage());
         return false;
@@ -238,6 +242,8 @@ public class HdfsPartitionPruner {
   private Set<Long> evalBinaryPredicate(Expr expr) {
     Preconditions.checkNotNull(expr);
     Preconditions.checkState(expr instanceof BinaryPredicate);
+    // TODO: Note that rewrite rules should have ensured that the slot
+    // is on the left.
     boolean isSlotOnLeft = true;
     if (Expr.IS_LITERAL.apply(expr.getChild(0))) isSlotOnLeft = false;
 
diff --git a/fe/src/main/java/org/apache/impala/planner/PlanNode.java b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
index 2e410ce..d264133 100644
--- a/fe/src/main/java/org/apache/impala/planner/PlanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/PlanNode.java
@@ -864,7 +864,10 @@ abstract public class PlanNode extends TreeNode<PlanNode> {
     int numWithoutSel = 0;
     List<T> remaining = Lists.newArrayListWithCapacity(conjuncts.size());
     for (T e : conjuncts) {
-      Preconditions.checkState(e.hasCost(), e.toSql());
+      if (!e.hasCost()) {
+        // Avoid toSql() calls for each call
+        Preconditions.checkState(false, e.toSql());
+      }
       totalCost += e.getCost();
       remaining.add(e);
       if (!e.hasSelectivity()) {
diff --git a/fe/src/main/java/org/apache/impala/rewrite/SimplifyConditionalsRule.java b/fe/src/main/java/org/apache/impala/rewrite/SimplifyConditionalsRule.java
index 2d0c2d9..3556bbc 100644
--- a/fe/src/main/java/org/apache/impala/rewrite/SimplifyConditionalsRule.java
+++ b/fe/src/main/java/org/apache/impala/rewrite/SimplifyConditionalsRule.java
@@ -78,8 +78,8 @@ public class SimplifyConditionalsRule implements ExprRewriteRule {
     // 'select if (true, 0, sum(id)) from alltypes' != 'select 0 from alltypes'
     if (expr != simplified) {
       simplified.analyze(analyzer);
-      if (expr.contains(Expr.isAggregatePredicate())
-          && !simplified.contains(Expr.isAggregatePredicate())) {
+      if (expr.contains(Expr.IS_AGGREGATE)
+          && !simplified.contains(Expr.IS_AGGREGATE)) {
         return expr;
       }
     }
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 66f27cd..90369bb 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -238,23 +238,23 @@ import com.google.common.collect.Sets;
  * metastore out of this class.
  */
 public class CatalogOpExecutor {
+  private static final Logger LOG = Logger.getLogger(CatalogOpExecutor.class);
   // Format string for exceptions returned by Hive Metastore RPCs.
   private final static String HMS_RPC_ERROR_FORMAT_STR =
       "Error making '%s' RPC to Hive Metastore: ";
 
-  private final CatalogServiceCatalog catalog_;
-
-  // Lock used to ensure that CREATE[DROP] TABLE[DATABASE] operations performed in
-  // catalog_ and the corresponding RPC to apply the change in HMS are atomic.
-  private final Object metastoreDdlLock_ = new Object();
-  private static final Logger LOG = Logger.getLogger(CatalogOpExecutor.class);
-
   // The maximum number of partitions to update in one Hive Metastore RPC.
   // Used when persisting the results of COMPUTE STATS statements.
   // It is also used as an upper limit for the number of partitions allowed in one ADD
   // PARTITION statement.
   public final static short MAX_PARTITION_UPDATES_PER_RPC = 500;
 
+  private final CatalogServiceCatalog catalog_;
+
+  // Lock used to ensure that CREATE[DROP] TABLE[DATABASE] operations performed in
+  // catalog_ and the corresponding RPC to apply the change in HMS are atomic.
+  private final Object metastoreDdlLock_ = new Object();
+
   public CatalogOpExecutor(CatalogServiceCatalog catalog) {
     catalog_ = catalog;
   }
@@ -766,13 +766,13 @@ public class CatalogOpExecutor {
       int numColumns =
           params.isSetColumn_stats() ? params.column_stats.size() : 0;
       LOG.info(String.format(
-          "Updating stats for table %s: table-stats=%s partitions=%s column-stats=%s",
+          "Updating stats for table %s: table-stats=%s partitions=%d column-stats=%d",
           tableName, params.isSetTable_stats(), numPartitions, numColumns));
     }
 
     // Update column stats.
     ColumnStatistics colStats = null;
-    numUpdatedColumns.setRef(Long.valueOf(0));
+    numUpdatedColumns.setRef(0L);
     if (params.isSetColumn_stats()) {
       colStats = createHiveColStats(params, table);
       if (colStats.getStatsObjSize() > 0) {
@@ -783,7 +783,7 @@ public class CatalogOpExecutor {
               "updateTableColumnStatistics"), e);
         }
       }
-      numUpdatedColumns.setRef(Long.valueOf(colStats.getStatsObjSize()));
+      numUpdatedColumns.setRef((long) colStats.getStatsObjSize());
     }
 
     // Deep copy the msTbl to avoid updating our cache before successfully persisting
@@ -810,11 +810,11 @@ public class CatalogOpExecutor {
 
     applyAlterTable(msTbl, false);
 
-    numUpdatedPartitions.setRef(Long.valueOf(0));
+    numUpdatedPartitions.setRef(0L);
     if (modifiedParts != null) {
-      numUpdatedPartitions.setRef(Long.valueOf(modifiedParts.size()));
+      numUpdatedPartitions.setRef((long) modifiedParts.size());
     } else if (params.isSetTable_stats()) {
-      numUpdatedPartitions.setRef(Long.valueOf(1));
+      numUpdatedPartitions.setRef(1L);
     }
   }
 
@@ -862,7 +862,7 @@ public class CatalogOpExecutor {
       // existing state of the metadata. See IMPALA-2201.
       long numRows = partitionStats.stats.num_rows;
       if (LOG.isTraceEnabled()) {
-        LOG.trace(String.format("Updating stats for partition %s: numRows=%s",
+        LOG.trace(String.format("Updating stats for partition %s: numRows=%d",
             partition.getValuesAsString(), numRows));
       }
       PartitionStatsUtil.partStatsToPartition(partitionStats, partition);
@@ -917,8 +917,8 @@ public class CatalogOpExecutor {
               ndvCap, entry.getValue(), tableCol.getType());
       if (colStatsData == null) continue;
       if (LOG.isTraceEnabled()) {
-        LOG.trace(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " +
-            "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(),
+        LOG.trace(String.format("Updating column stats for %s: numDVs=%d numNulls=%d " +
+            "maxSize=%d avgSize=%.2f", colName, entry.getValue().getNum_distinct_values(),
             entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
             entry.getValue().getAvg_size()));
       }
@@ -3749,7 +3749,7 @@ public class CatalogOpExecutor {
       throws ImpalaRuntimeException, CatalogException {
     Db db = catalog_.getDb(dbName);
     if (db == null) {
-      throw new CatalogException("Database: " + db.getName() + " does not exist.");
+      throw new CatalogException("Database: " + dbName + " does not exist.");
     }
     synchronized (metastoreDdlLock_) {
       Database msDb = db.getMetaStoreDb();
@@ -3782,7 +3782,7 @@ public class CatalogOpExecutor {
       TDdlExecResponse response) throws CatalogException, ImpalaRuntimeException {
     Db db = catalog_.getDb(dbName);
     if (db == null) {
-      throw new CatalogException("Database: " + db.getName() + " does not exist.");
+      throw new CatalogException("Database: " + dbName + " does not exist.");
     }
     Preconditions.checkNotNull(params.owner_name);
     Preconditions.checkNotNull(params.owner_type);
diff --git a/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java b/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
index e338ecb..7f51f8c 100644
--- a/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
+++ b/fe/src/main/java/org/apache/impala/util/MaxRowsProcessedVisitor.java
@@ -17,12 +17,13 @@
 
 package org.apache.impala.util;
 
-import com.google.common.base.Preconditions;
 import org.apache.impala.planner.JoinNode;
 import org.apache.impala.planner.PlanFragment;
 import org.apache.impala.planner.PlanNode;
 import org.apache.impala.planner.ScanNode;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Returns the maximum number of rows processed by any node in a given plan tree
  */
@@ -31,13 +32,13 @@ public class MaxRowsProcessedVisitor implements Visitor<PlanNode> {
   // True if we should abort because we don't have valid estimates
   // for a plan node.
   private boolean valid_ = true;
-  private boolean foundJoinNode_ = false;
+  private boolean foundJoinNode_ ;
 
   // Max number of rows processed across all instances of a plan node.
-  private long maxRowsProcessed_ = 0;
+  private long maxRowsProcessed_ ;
 
   // Max number of rows processed per backend impala daemon for a plan node.
-  private long maxRowsProcessedPerNode_ = 0;
+  private long maxRowsProcessedPerNode_;
 
   @Override
   public void visit(PlanNode caller) {
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
index a0d34f8..ecd620b 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeExprsTest.java
@@ -2265,7 +2265,7 @@ public class AnalyzeExprsTest extends AnalyzerTest {
     AnalyzesOk(inPredStr.toString() + ")");
     inPredStr.append(", " + 1234);
     AnalysisError(inPredStr.toString() + ")",
-        String.format("Exceeded the maximum number of child expressions (%s).\n" +
+        String.format("Exceeded the maximum number of child expressions (%d).\n" +
         "Expression has %s children",  Expr.EXPR_CHILDREN_LIMIT,
         Expr.EXPR_CHILDREN_LIMIT + 1));
 
@@ -2277,7 +2277,7 @@ public class AnalyzeExprsTest extends AnalyzerTest {
     AnalyzesOk(caseExprStr.toString() + " end");
     caseExprStr.append(" when true then 1");
     AnalysisError(caseExprStr.toString() + " end",
-        String.format("Exceeded the maximum number of child expressions (%s).\n" +
+        String.format("Exceeded the maximum number of child expressions (%d).\n" +
         "Expression has %s children", Expr.EXPR_CHILDREN_LIMIT,
         Expr.EXPR_CHILDREN_LIMIT + 2));
   }
@@ -2353,7 +2353,7 @@ public class AnalyzeExprsTest extends AnalyzerTest {
     testDecimalExpr(expr, expectedType, expectedType);
   }
 
-  // Verify that mod and % returns the same type when it's DECIMAL V2 mdoe.
+  // Verify that mod and % returns the same type when it's DECIMAL V2 mode.
   // See IMPALA-6202.
   @Test
   public void TestModReturnType() {
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
index 72d0cde..8798fa7 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
@@ -110,6 +110,7 @@ public class HdfsPartitionTest {
     assertTrue(Integer.signum(comparePartitionKeyValues(o1, o2)) ==
         -Integer.signum(comparePartitionKeyValues(o2, o1)));
   }
+
   private void verifyTransitive(List<LiteralExpr> o1, List<LiteralExpr> o2,
                                 List<LiteralExpr> o3) {
     // ((compare(x, y)>0) && (compare(y, z)>0)) implies compare(x, z)>0
@@ -118,10 +119,12 @@ public class HdfsPartitionTest {
       assertTrue(comparePartitionKeyValues(o1, o3) > 0);
     }
   }
+
   private void verifyReflexive(List<LiteralExpr> o1) {
     // (compare(x, x)==0) is always true
     assertTrue(comparePartitionKeyValues(o1, o1) == 0);
   }
+
   private void verifyAntiSymmetric(List<LiteralExpr> o1, List<LiteralExpr> o2,
                                    List<LiteralExpr> o3) {
     // compare(x, y)==0 implies that sgn(compare(x, z))==sgn(compare(y, z)) for all z.
diff --git a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
index 657509a..7e3eb3d 100644
--- a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoTest.java
@@ -106,7 +106,7 @@ public class PartialCatalogInfoTest {
       tasksToWaitFor.add(threadPoolExecutor.submit(new
           CallableGetPartialCatalogObjectRequest(request)));
     }
-    for (Future task: tasksToWaitFor) task.get();
+    for (Future<?> task: tasksToWaitFor) task.get();
   }
 
   @Test
@@ -243,7 +243,7 @@ public class PartialCatalogInfoTest {
     // Uses a callable<Void> instead of Runnable because junit does not catch exceptions
     // from threads other than the main thread. Callable here makes sure the exception
     // is propagated to the main thread.
-    final Callable<Void> assertReqCount = new Callable() {
+    final Callable<Void> assertReqCount = new Callable<Void>() {
       @Override
       public Void call() throws Exception {
         while (!requestsFinished.get()) {
@@ -254,7 +254,7 @@ public class PartialCatalogInfoTest {
         return null;
       }
     };
-    Future assertThreadTask;
+    Future<Void> assertThreadTask;
     try {
       // Assert the request count in a tight loop.
       assertThreadTask = Executors.newSingleThreadExecutor().submit(assertReqCount);
diff --git a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
index bcbcd43..bba88c9 100644
--- a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
@@ -24,23 +24,19 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.collect.Lists;
 import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec;
 import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionFilterMode;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
 import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
@@ -52,7 +48,6 @@ import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.IncompleteTable;
 import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
-import org.apache.impala.catalog.PrunablePartition;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.events.MetastoreEventUtils.MetastoreEvent;
 import org.apache.impala.catalog.events.MetastoreEventUtils.MetastoreEventType;
@@ -85,6 +80,8 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+
 /**
  * Main test class to cover the functionality of MetastoreEventProcessor. In order to make
  * the test deterministic, this test relies on the fact the default value of
diff --git a/tests/common/skip.py b/tests/common/skip.py
index b58f057..709c446 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -39,7 +39,7 @@ from tests.util.filesystem_utils import (
 
 class SkipIfS3:
 
-  # These ones are skipped due to product limitations.
+  # These are skipped due to product limitations.
   caching = pytest.mark.skipif(IS_S3, reason="SET CACHED not implemented for S3")
   hive = pytest.mark.skipif(IS_S3, reason="Hive doesn't work with S3")
   hdfs_block_size = pytest.mark.skipif(IS_S3, reason="S3 uses it's own block size")
@@ -50,7 +50,7 @@ class SkipIfS3:
   empty_directory = pytest.mark.skipif(IS_S3,
       reason="Empty directories are not supported on S3")
 
-  # These ones need test infra work to re-enable.
+  # These need test infra work to re-enable.
   udfs = pytest.mark.skipif(IS_S3, reason="udas/udfs not copied to S3")
   datasrc = pytest.mark.skipif(IS_S3, reason="data sources not copied to S3")
   hbase = pytest.mark.skipif(IS_S3, reason="HBase not started with S3")
@@ -62,7 +62,7 @@ class SkipIfS3:
 
 class SkipIfABFS:
 
-  # These ones are skipped due to product limitations.
+  # These are skipped due to product limitations.
   caching = pytest.mark.skipif(IS_ABFS, reason="SET CACHED not implemented for ABFS")
   hive = pytest.mark.skipif(IS_ABFS, reason="Hive doesn't work with ABFS")
   hdfs_block_size = pytest.mark.skipif(IS_ABFS, reason="ABFS uses it's own block size")
@@ -73,7 +73,7 @@ class SkipIfABFS:
   trash = pytest.mark.skipif(IS_ABFS,
       reason="Drop/purge not working as expected on ABFS, IMPALA-7726")
 
-  # These ones need test infra work to re-enable.
+  # These need test infra work to re-enable.
   udfs = pytest.mark.skipif(IS_ABFS, reason="udas/udfs not copied to ABFS")
   datasrc = pytest.mark.skipif(IS_ABFS, reason="data sources not copied to ABFS")
   hbase = pytest.mark.skipif(IS_ABFS, reason="HBase not started with ABFS")
@@ -82,7 +82,7 @@ class SkipIfABFS:
 
 class SkipIfADLS:
 
-  # These ones are skipped due to product limitations.
+  # These are skipped due to product limitations.
   caching = pytest.mark.skipif(IS_ADLS, reason="SET CACHED not implemented for ADLS")
   hive = pytest.mark.skipif(IS_ADLS, reason="Hive doesn't work with ADLS")
   hdfs_block_size = pytest.mark.skipif(IS_ADLS, reason="ADLS uses it's own block size")
@@ -91,7 +91,7 @@ class SkipIfADLS:
   hdfs_encryption = pytest.mark.skipif(IS_ADLS,
       reason="HDFS encryption is not supported with ADLS")
 
-  # These ones need test infra work to re-enable.
+  # These need test infra work to re-enable.
   udfs = pytest.mark.skipif(IS_ADLS, reason="udas/udfs not copied to ADLS")
   datasrc = pytest.mark.skipif(IS_ADLS, reason="data sources not copied to ADLS")
   hbase = pytest.mark.skipif(IS_ADLS, reason="HBase not started with ADLS")
@@ -132,7 +132,7 @@ class SkipIfIsilon:
   jira = partial(pytest.mark.skipif, IS_ISILON)
 
 class SkipIfLocal:
-  # These ones are skipped due to product limitations.
+  # These are skipped due to product limitations.
   caching = pytest.mark.skipif(IS_LOCAL,
       reason="HDFS caching not supported on local file system")
   hdfs_blocks = pytest.mark.skipif(IS_LOCAL,
@@ -148,7 +148,7 @@ class SkipIfLocal:
   hdfs_fd_caching = pytest.mark.skipif(IS_LOCAL,
       reason="HDFS file handle caching not supported for local non-HDFS files")
 
-  # These ones need test infra work to re-enable.
+  # These need test infra work to re-enable.
   hbase = pytest.mark.skipif(IS_LOCAL,
       reason="HBase not started when using local file system")
   hdfs_client = pytest.mark.skipif(IS_LOCAL,
@@ -159,7 +159,7 @@ class SkipIfLocal:
       reason="Tests rely on the root directory")
 
 class SkipIfNotHdfsMinicluster:
-  # These ones are skipped when not running against a local HDFS mini-cluster.
+  # These are skipped when not running against a local HDFS mini-cluster.
   plans = pytest.mark.skipif(not IS_HDFS or pytest.config.option.testing_remote_cluster,
       reason="Test assumes plans from local HDFS mini-cluster")
   tuned_for_minicluster = pytest.mark.skipif(
diff --git a/tests/util/test_file_parser.py b/tests/util/test_file_parser.py
index 1a0cfdb..bb78932 100644
--- a/tests/util/test_file_parser.py
+++ b/tests/util/test_file_parser.py
@@ -100,7 +100,7 @@ def parse_query_test_file(file_name, valid_section_names=None, encoding=None):
       skip_unknown_sections=False)
 
 def parse_table_constraints(constraints_file):
-  """Reads a table contraints file, if one exists"""
+  """Reads a table constraints file, if one exists"""
   schema_include = defaultdict(list)
   schema_exclude = defaultdict(list)
   schema_only = defaultdict(list)