You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/07/19 15:54:04 UTC

[7/7] hive git commit: HIVE-19360: CBO: Add an "optimizedSQL" to QueryPlan object (Gopal V, reviewed by Jesus Camacho Rodriguez)

HIVE-19360: CBO: Add an "optimizedSQL" to QueryPlan object (Gopal V, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6d15ce49
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6d15ce49
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6d15ce49

Branch: refs/heads/master
Commit: 6d15ce49a65ca673bdebf176d90a2accdaedbcaf
Parents: 9eb8f17
Author: Gopal V <go...@apache.org>
Authored: Mon Jul 9 17:43:56 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Jul 19 08:53:26 2018 -0700

----------------------------------------------------------------------
 .../insert_overwrite_directory.q.out            |  2 +
 .../write_final_output_blobstore.q.out          |  8 ++
 .../java/org/apache/hadoop/hive/ql/Context.java | 10 +++
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  5 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |  9 ++
 .../apache/hadoop/hive/ql/exec/ExplainTask.java | 13 ++-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |  3 +-
 .../hive/ql/hooks/HiveProtoLoggingHook.java     |  3 +-
 .../metadata/HiveMaterializedViewsRegistry.java | 11 ++-
 .../ql/optimizer/calcite/RelOptHiveTable.java   | 73 ++++++++++++---
 .../hadoop/hive/ql/parse/CalcitePlanner.java    | 86 ++++++++++++++----
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |  3 +-
 .../apache/hadoop/hive/ql/plan/ExplainWork.java | 14 ++-
 .../hadoop/hive/ql/exec/TestExplainTask.java    |  2 +-
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |  2 +-
 .../results/clientpositive/acid_nullscan.q.out  |  3 +
 .../alter_partition_coltype.q.out               | 18 ++++
 .../results/clientpositive/ambiguitycheck.q.out |  3 +
 .../analyze_table_null_partition.q.out          |  2 +
 .../clientpositive/autoColumnStats_1.q.out      |  2 +
 .../clientpositive/autoColumnStats_2.q.out      |  2 +
 .../auto_join_reordering_values.q.out           | 17 ++++
 .../test/results/clientpositive/bucket1.q.out   |  2 +
 .../test/results/clientpositive/bucket2.q.out   |  2 +
 .../test/results/clientpositive/bucket3.q.out   |  2 +
 .../clientpositive/bucket_map_join_spark1.q.out | 14 +++
 .../clientpositive/bucket_map_join_spark2.q.out | 14 +++
 .../clientpositive/bucket_map_join_spark3.q.out | 14 +++
 .../clientpositive/bucket_map_join_spark4.q.out | 20 +++++
 .../test/results/clientpositive/combine2.q.out  |  3 +
 .../test/results/clientpositive/comments.q.out  |  7 ++
 .../constantPropagateForSubQuery.q.out          |  6 ++
 .../dynamic_partition_skip_default.q.out        |  9 ++
 .../encryption_join_unencrypted_tbl.q.out       |  7 ++
 ...on_join_with_different_encryption_keys.q.out |  7 ++
 .../erasurecoding/erasure_explain.q.out         |  4 +
 .../extrapolate_part_stats_date.q.out           |  2 +
 .../extrapolate_part_stats_full.q.out           |  8 ++
 .../extrapolate_part_stats_partial.q.out        | 12 +++
 .../results/clientpositive/filter_aggr.q.out    |  3 +
 .../clientpositive/filter_join_breaktask.q.out  | 10 +++
 .../results/clientpositive/filter_union.q.out   |  7 ++
 .../clientpositive/groupby_sort_1_23.q.out      | 71 +++++++++++++++
 .../results/clientpositive/groupby_sort_6.q.out | 12 +++
 .../clientpositive/groupby_sort_skew_1_23.q.out | 71 +++++++++++++++
 .../test/results/clientpositive/input23.q.out   |  8 ++
 ql/src/test/results/clientpositive/input4.q.out |  2 +-
 .../test/results/clientpositive/input42.q.out   |  9 ++
 .../results/clientpositive/input_part1.q.out    |  3 +
 .../results/clientpositive/input_part9.q.out    |  3 +
 ql/src/test/results/clientpositive/join17.q.out |  7 ++
 ql/src/test/results/clientpositive/join26.q.out | 10 +++
 ql/src/test/results/clientpositive/join32.q.out | 10 +++
 ql/src/test/results/clientpositive/join33.q.out | 10 +++
 ql/src/test/results/clientpositive/join34.q.out | 11 +++
 ql/src/test/results/clientpositive/join35.q.out | 13 +++
 ql/src/test/results/clientpositive/join9.q.out  |  7 ++
 .../clientpositive/join_filters_overlap.q.out   | 50 +++++++++++
 .../clientpositive/list_bucket_dml_1.q.out      |  6 ++
 .../clientpositive/list_bucket_dml_11.q.out     |  5 ++
 .../clientpositive/list_bucket_dml_12.q.out     |  8 ++
 .../clientpositive/list_bucket_dml_13.q.out     |  5 ++
 .../clientpositive/list_bucket_dml_14.q.out     |  5 ++
 .../clientpositive/list_bucket_dml_2.q.out      |  6 ++
 .../clientpositive/list_bucket_dml_3.q.out      |  6 ++
 .../clientpositive/list_bucket_dml_4.q.out      |  9 ++
 .../clientpositive/list_bucket_dml_5.q.out      |  6 ++
 .../clientpositive/list_bucket_dml_6.q.out      |  9 ++
 .../clientpositive/list_bucket_dml_7.q.out      |  9 ++
 .../clientpositive/list_bucket_dml_8.q.out      |  6 ++
 .../clientpositive/list_bucket_dml_9.q.out      |  9 ++
 .../list_bucket_query_multiskew_1.q.out         | 12 +++
 .../list_bucket_query_multiskew_2.q.out         |  9 ++
 .../list_bucket_query_multiskew_3.q.out         | 12 +++
 .../list_bucket_query_oneskew_1.q.out           |  9 ++
 .../list_bucket_query_oneskew_2.q.out           | 14 +++
 .../list_bucket_query_oneskew_3.q.out           |  3 +
 .../llap/acid_bucket_pruning.q.out              |  3 +
 .../clientpositive/llap/autoColumnStats_1.q.out |  2 +
 .../clientpositive/llap/autoColumnStats_2.q.out |  2 +
 .../llap/auto_sortmerge_join_1.q.out            | 21 +++++
 .../llap/auto_sortmerge_join_11.q.out           | 14 +++
 .../llap/auto_sortmerge_join_12.q.out           | 12 +++
 .../llap/auto_sortmerge_join_2.q.out            | 14 +++
 .../llap/auto_sortmerge_join_3.q.out            | 21 +++++
 .../llap/auto_sortmerge_join_4.q.out            | 21 +++++
 .../llap/auto_sortmerge_join_5.q.out            | 21 +++++
 .../llap/auto_sortmerge_join_7.q.out            | 21 +++++
 .../llap/auto_sortmerge_join_8.q.out            | 21 +++++
 .../results/clientpositive/llap/bucket2.q.out   |  2 +
 .../results/clientpositive/llap/bucket3.q.out   |  2 +
 .../results/clientpositive/llap/bucket4.q.out   |  2 +
 .../clientpositive/llap/bucket_many.q.out       |  2 +
 .../llap/bucket_num_reducers.q.out              |  2 +
 .../llap/bucket_num_reducers2.q.out             |  2 +
 .../clientpositive/llap/bucketmapjoin1.q.out    | 28 ++++++
 .../clientpositive/llap/bucketmapjoin2.q.out    | 21 +++++
 .../clientpositive/llap/bucketmapjoin3.q.out    | 14 +++
 .../clientpositive/llap/bucketmapjoin4.q.out    | 14 +++
 .../clientpositive/llap/bucketpruning1.q.out    | 54 ++++++++++++
 .../llap/current_date_timestamp.q.out           |  2 +
 .../llap/disable_merge_for_bucketing.q.out      |  2 +
 .../llap/dynamic_semijoin_reduction.q.out       |  7 ++
 .../llap/dynamic_semijoin_user_level.q.out      |  7 ++
 .../extrapolate_part_stats_partial_ndv.q.out    |  6 ++
 .../llap/filter_join_breaktask.q.out            | 10 +++
 .../clientpositive/llap/filter_union.q.out      |  7 ++
 .../clientpositive/llap/join32_lessSize.q.out   | 44 +++++++++
 .../llap/list_bucket_dml_10.q.out               |  2 +
 .../clientpositive/llap/mapjoin_mapjoin.q.out   | 10 +++
 .../clientpositive/llap/metadataonly1.q.out     | 27 ++++++
 .../clientpositive/llap/partition_pruning.q.out |  9 ++
 .../clientpositive/llap/ppd_union_view.q.out    | 24 +++++
 .../clientpositive/llap/smb_mapjoin_15.q.out    | 36 ++++++++
 .../results/clientpositive/llap/stats11.q.out   | 14 +++
 .../llap/tez_fixed_bucket_pruning.q.out         | 32 +++++++
 .../llap/tez_join_result_complex.q.out          | 14 +++
 .../clientpositive/llap/unionDistinct_1.q.out   | 74 ++++++++++++++++
 .../clientpositive/llap/union_stats.q.out       | 93 +++++++++++---------
 .../clientpositive/llap/vectorization_0.q.out   |  9 ++
 .../clientpositive/louter_join_ppr.q.out        | 28 ++++++
 ql/src/test/results/clientpositive/macro.q.out  |  9 ++
 .../clientpositive/mapjoin_mapjoin.q.out        | 10 +++
 ql/src/test/results/clientpositive/merge3.q.out |  5 ++
 .../offset_limit_global_optimizer.q.out         | 20 +++++
 .../results/clientpositive/outer_join_ppr.q.out | 14 +++
 .../parquet_vectorization_0.q.out               |  9 ++
 ql/src/test/results/clientpositive/pcr.q.out    | 82 +++++++++++++++++
 .../test/results/clientpositive/plan_json.q.out |  2 +-
 .../results/clientpositive/pointlookup2.q.out   | 56 ++++++++++++
 .../results/clientpositive/pointlookup3.q.out   | 53 +++++++++++
 .../results/clientpositive/pointlookup4.q.out   |  4 +
 .../clientpositive/ppd_join_filter.q.out        | 36 ++++++++
 ql/src/test/results/clientpositive/ppd_vc.q.out | 11 +++
 .../clientpositive/ppr_allchildsarenull.q.out   |  6 ++
 .../test/results/clientpositive/push_or.q.out   |  4 +
 .../clientpositive/rand_partitionpruner1.q.out  |  3 +
 .../clientpositive/rand_partitionpruner2.q.out  |  3 +
 .../clientpositive/rand_partitionpruner3.q.out  |  6 ++
 .../clientpositive/router_join_ppr.q.out        | 28 ++++++
 .../clientpositive/serde_user_properties.q.out  |  8 ++
 .../spark/auto_join_reordering_values.q.out     | 17 ++++
 .../spark/auto_sortmerge_join_1.q.out           | 21 +++++
 .../spark/auto_sortmerge_join_12.q.out          | 12 +++
 .../spark/auto_sortmerge_join_2.q.out           | 14 +++
 .../spark/auto_sortmerge_join_3.q.out           | 21 +++++
 .../spark/auto_sortmerge_join_4.q.out           | 21 +++++
 .../spark/auto_sortmerge_join_5.q.out           | 21 +++++
 .../spark/auto_sortmerge_join_7.q.out           | 21 +++++
 .../spark/auto_sortmerge_join_8.q.out           | 21 +++++
 .../results/clientpositive/spark/bucket2.q.out  |  2 +
 .../results/clientpositive/spark/bucket3.q.out  |  2 +
 .../results/clientpositive/spark/bucket4.q.out  |  2 +
 .../clientpositive/spark/bucket4.q.out_spark    |  2 +
 .../spark/bucket_map_join_spark1.q.out          | 14 +++
 .../spark/bucket_map_join_spark2.q.out          | 14 +++
 .../spark/bucket_map_join_spark3.q.out          | 14 +++
 .../spark/bucket_map_join_spark4.q.out          | 20 +++++
 .../clientpositive/spark/bucketmapjoin1.q.out   | 28 ++++++
 .../clientpositive/spark/bucketmapjoin2.q.out   | 21 +++++
 .../clientpositive/spark/bucketmapjoin3.q.out   | 14 +++
 .../clientpositive/spark/bucketmapjoin4.q.out   | 14 +++
 .../spark/disable_merge_for_bucketing.q.out     |  2 +
 .../disable_merge_for_bucketing.q.out_spark     |  2 +
 .../spark/filter_join_breaktask.q.out           | 10 +++
 .../spark/groupby_sort_1_23.q.out               | 71 +++++++++++++++
 .../spark/groupby_sort_skew_1_23.q.out          | 71 +++++++++++++++
 .../results/clientpositive/spark/join17.q.out   |  7 ++
 .../results/clientpositive/spark/join26.q.out   | 10 +++
 .../results/clientpositive/spark/join32.q.out   | 10 +++
 .../clientpositive/spark/join32_lessSize.q.out  | 44 +++++++++
 .../results/clientpositive/spark/join33.q.out   | 10 +++
 .../results/clientpositive/spark/join34.q.out   | 11 +++
 .../results/clientpositive/spark/join35.q.out   | 13 +++
 .../results/clientpositive/spark/join9.q.out    |  7 ++
 .../spark/join_filters_overlap.q.out            | 50 +++++++++++
 .../spark/list_bucket_dml_10.q.out              |  2 +
 .../spark/list_bucket_dml_2.q.out               |  6 ++
 .../clientpositive/spark/louter_join_ppr.q.out  | 28 ++++++
 .../clientpositive/spark/mapjoin_mapjoin.q.out  | 10 +++
 .../clientpositive/spark/outer_join_ppr.q.out   | 14 +++
 .../spark/parquet_vectorization_0.q.out         |  9 ++
 .../test/results/clientpositive/spark/pcr.q.out | 82 +++++++++++++++++
 .../clientpositive/spark/ppd_join_filter.q.out  | 36 ++++++++
 .../clientpositive/spark/router_join_ppr.q.out  | 28 ++++++
 .../clientpositive/spark/smb_mapjoin_15.q.out   | 36 ++++++++
 .../spark/spark_union_merge.q.out               | 14 +++
 .../results/clientpositive/spark/stats0.q.out   |  4 +
 .../results/clientpositive/spark/union22.q.out  | 11 +++
 .../results/clientpositive/spark/union24.q.out  | 47 ++++++++++
 .../clientpositive/spark/vectorization_0.q.out  |  9 ++
 ql/src/test/results/clientpositive/stats0.q.out |  4 +
 .../truncate_column_list_bucket.q.out           |  6 ++
 .../results/clientpositive/udf_reflect2.q.out   |  3 +
 .../test/results/clientpositive/union22.q.out   | 11 +++
 .../test/results/clientpositive/union24.q.out   | 47 ++++++++++
 .../clientpositive/vector_outer_join3.q.out     |  6 +-
 .../clientpositive/vector_outer_join4.q.out     |  6 +-
 .../clientpositive/vector_outer_join6.q.out     |  4 +-
 199 files changed, 2990 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
index 8581a17..c59b57c 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
@@ -62,6 +62,8 @@ PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE DIRECTORY '### test.blobstore.
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE DIRECTORY '### test.blobstore.path ###/table1.dir/' SELECT * FROM table1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `id`, `key`
+FROM `default`.`table1`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
index 73fe3f9..b6fff60 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
@@ -28,6 +28,10 @@ PREHOOK: query: EXPLAIN EXTENDED FROM hdfs_table INSERT OVERWRITE TABLE blobstor
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED FROM hdfs_table INSERT OVERWRITE TABLE blobstore_table SELECT hdfs_table.key GROUP BY hdfs_table.key ORDER BY hdfs_table.key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`
+FROM `default`.`hdfs_table`
+GROUP BY `key`
+ORDER BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -299,6 +303,10 @@ PREHOOK: query: EXPLAIN EXTENDED FROM hdfs_table INSERT OVERWRITE TABLE blobstor
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED FROM hdfs_table INSERT OVERWRITE TABLE blobstore_table SELECT hdfs_table.key GROUP BY hdfs_table.key ORDER BY hdfs_table.key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`
+FROM `default`.`hdfs_table`
+GROUP BY `key`
+ORDER BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 3004f9c..b4d5806 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -74,6 +74,7 @@ import org.slf4j.LoggerFactory;
  * each query should call clear() at end of use to remove temporary folders
  */
 public class Context {
+
   private boolean isHDFSCleanup;
   private Path resFile;
   private Path resDir;
@@ -103,6 +104,7 @@ public class Context {
   protected ExplainConfiguration explainConfig = null;
   protected String cboInfo;
   protected boolean cboSucceeded;
+  protected String optimizedSql;
   protected String cmd = "";
   private TokenRewriteStream tokenRewriteStream;
   // Holds the qualified name to tokenRewriteStream for the views
@@ -1003,6 +1005,14 @@ public class Context {
     this.cboInfo = cboInfo;
   }
 
+  public String getOptimizedSql() {
+    return this.optimizedSql;
+  }
+
+  public void setOptimizedSql(String newSql) {
+    this.optimizedSql = newSql;
+  }
+
   public boolean isCboSucceeded() {
     return cboSucceeded;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 78922f1..ef96b1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -678,7 +678,8 @@ public class Driver implements IDriver {
       schema = getSchema(sem, conf);
       plan = new QueryPlan(queryStr, sem, queryDisplay.getQueryStartTime(), queryId,
           queryState.getHiveOperation(), schema);
-
+      // save the optimized sql for the explain
+      plan.setOptimizedQueryString(ctx.getOptimizedSql());
 
       conf.set("mapreduce.workflow.id", "hive_" + queryId);
       conf.set("mapreduce.workflow.name", queryStr);
@@ -1007,7 +1008,7 @@ public class Driver implements IDriver {
     PrintStream ps = new PrintStream(baos);
     try {
       List<Task<?>> rootTasks = sem.getAllRootTasks();
-      task.getJSONPlan(ps, rootTasks, sem.getFetchTask(), false, true, true);
+      task.getJSONPlan(ps, rootTasks, sem.getFetchTask(), false, true, true, plan.getOptimizedQueryString());
       ret = baos.toString();
     } catch (Exception e) {
       LOG.warn("Exception generating explain output: " + e, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 79e938a..8943bc7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -74,6 +74,7 @@ public class QueryPlan implements Serializable {
 
 
   private String queryString;
+  private String optimizedQueryString;
 
   private ArrayList<Task<? extends Serializable>> rootTasks;
   private FetchTask fetchTask;
@@ -741,6 +742,14 @@ public class QueryPlan implements Serializable {
     this.queryString = queryString;
   }
 
+  public String getOptimizedQueryString() {
+    return this.optimizedQueryString;
+  }
+
+  public void setOptimizedQueryString(String optimizedQueryString) {
+    this.optimizedQueryString = optimizedQueryString;
+  }
+
   public org.apache.hadoop.hive.ql.plan.api.Query getQuery() {
     return query;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index 34da025..752c3f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -217,11 +217,11 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
   public JSONObject getJSONPlan(PrintStream out, ExplainWork work)
       throws Exception {
     return getJSONPlan(out, work.getRootTasks(), work.getFetchTask(),
-                       work.isFormatted(), work.getExtended(), work.isAppendTaskType());
+                       work.isFormatted(), work.getExtended(), work.isAppendTaskType(), work.getOptimizedSQL());
   }
 
   public JSONObject getJSONPlan(PrintStream out, List<Task<?>> tasks, Task<?> fetchTask,
-      boolean jsonOutput, boolean isExtended, boolean appendTaskType) throws Exception {
+      boolean jsonOutput, boolean isExtended, boolean appendTaskType, String optimizedSQL) throws Exception {
 
     // If the user asked for a formatted output, dump the json output
     // in the output stream
@@ -231,6 +231,15 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       out = null;
     }
 
+    if (optimizedSQL != null) {
+      if (jsonOutput) {
+        outJSONObject.put("optimizedSQL", optimizedSQL);
+      } else {
+        out.print("OPTIMIZED SQL: ");
+        out.println(optimizedSQL);
+      }
+    }
+
     List<Task> ordered = StageIDsRearranger.getExplainOrder(conf, tasks);
 
     if (fetchTask != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
index 0ae60b5..fa69f13 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
@@ -266,7 +266,8 @@ public class ATSHook implements ExecuteWithHookContext {
                   plan.getFetchTask(),// FetchTask
                   null,// analyzer
                   config, //explainConfig
-                  null// cboInfo
+                  null, // cboInfo
+                  plan.getOptimizedQueryString() // optimizedSQL
               );
                 @SuppressWarnings("unchecked")
                 ExplainTask explain = (ExplainTask) TaskFactory.get(work);

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
index f463437..155b2be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
@@ -479,7 +479,8 @@ public class HiveProtoLoggingHook implements ExecuteWithHookContext {
           plan.getFetchTask(), // FetchTask
           null, // analyzer
           config, // explainConfig
-          null // cboInfo
+          null, // cboInfo,
+          plan.getOptimizedQueryString()
       );
       ExplainTask explain = (ExplainTask) TaskFactory.get(work, conf);
       explain.initialize(hookContext.getQueryState(), plan, null, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index 98c9ce9..5c9162f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -339,13 +339,12 @@ public final class HiveMaterializedViewsRegistry {
     }
 
     // 2. Build RelOptAbstractTable
-    String fullyQualifiedTabName = viewTable.getDbName();
-    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
-      fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
-    }
-    else {
-      fullyQualifiedTabName = viewTable.getTableName();
+    List<String> fullyQualifiedTabName = new ArrayList<>();
+    if (viewTable.getDbName() != null && !viewTable.getDbName().isEmpty()) {
+      fullyQualifiedTabName.add(viewTable.getDbName());
     }
+    fullyQualifiedTabName.add(viewTable.getTableName());
+
     RelNode tableRel;
 
     // 3. Build operator

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index 6cc6d02..1a536da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -18,19 +18,24 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
 
-import org.apache.calcite.plan.RelOptAbstractTable;
+import org.apache.calcite.linq4j.tree.Expression;
 import org.apache.calcite.plan.RelOptSchema;
+import org.apache.calcite.plan.RelOptTable;
 import org.apache.calcite.plan.RelOptUtil.InputFinder;
+import org.apache.calcite.prepare.RelOptTableImpl;
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.RelDistributions;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelFieldCollation.Direction;
 import org.apache.calcite.rel.RelFieldCollation.NullDirection;
@@ -41,6 +46,7 @@ import org.apache.calcite.rel.logical.LogicalTableScan;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.schema.ColumnStrategy;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.mapping.IntPair;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -76,7 +82,14 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 
-public class RelOptHiveTable extends RelOptAbstractTable {
+public class RelOptHiveTable implements RelOptTable {
+
+  //~ Instance fields --------------------------------------------------------
+
+  private final RelOptSchema schema;
+  private final RelDataType rowType;
+  private final List<String> qualifiedTblName;
+  private final String name;
   private final Table                             hiveTblMetadata;
   private final ImmutableList<ColumnInfo>         hiveNonPartitionCols;
   private final ImmutableList<ColumnInfo>         hivePartitionCols;
@@ -97,12 +110,15 @@ public class RelOptHiveTable extends RelOptAbstractTable {
 
   protected static final Logger LOG = LoggerFactory.getLogger(RelOptHiveTable.class.getName());
 
-  public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName,
+  public RelOptHiveTable(RelOptSchema calciteSchema, List<String> qualifiedTblName,
       RelDataType rowType, Table hiveTblMetadata, List<ColumnInfo> hiveNonPartitionCols,
       List<ColumnInfo> hivePartitionCols, List<VirtualColumn> hiveVirtualCols, HiveConf hconf,
       Map<String, PrunedPartitionList> partitionCache, Map<String, ColumnStatsList> colStatsCache,
       AtomicInteger noColsMissingStats) {
-    super(calciteSchema, qualifiedTblName, rowType);
+    this.schema = calciteSchema;
+    this.qualifiedTblName = ImmutableList.copyOf(qualifiedTblName);
+    this.name = this.qualifiedTblName.stream().collect(Collectors.joining("."));
+    this.rowType = rowType;
     this.hiveTblMetadata = hiveTblMetadata;
     this.hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols);
     this.hiveNonPartitionColsMap = HiveCalciteUtil.getColInfoMap(hiveNonPartitionCols, 0);
@@ -118,6 +134,42 @@ public class RelOptHiveTable extends RelOptAbstractTable {
     this.referentialConstraints = generateReferentialConstraints();
   }
 
+  //~ Methods ----------------------------------------------------------------
+
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public List<String> getQualifiedName() {
+    return qualifiedTblName;
+  }
+
+  @Override
+  public RelDataType getRowType() {
+    return rowType;
+  }
+
+  @Override
+  public RelOptSchema getRelOptSchema() {
+    return schema;
+  }
+
+  @Override
+  public Expression getExpression(Class clazz) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public RelOptTable extend(List<RelDataTypeField> extendedFields) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<ColumnStrategy> getColumnStrategies() {
+    return RelOptTableImpl.columnStrategies(this);
+  }
+
   public RelOptHiveTable copy(RelDataType newRowType) {
     // 1. Build map of column name to col index of original schema
     // Assumption: Hive Table can not contain duplicate column names
@@ -149,7 +201,7 @@ public class RelOptHiveTable extends RelOptAbstractTable {
     }
 
     // 3. Build new Table
-    return new RelOptHiveTable(this.schema, this.name, newRowType,
+    return new RelOptHiveTable(this.schema, this.qualifiedTblName, newRowType,
         this.hiveTblMetadata, newHiveNonPartitionCols, newHivePartitionCols, newHiveVirtualCols,
         this.hiveConf, this.partitionCache, this.colStatsCache, this.noColsMissingStats);
   }
@@ -236,17 +288,14 @@ public class RelOptHiveTable extends RelOptAbstractTable {
     }
     ImmutableList.Builder<RelReferentialConstraint> builder = ImmutableList.builder();
     for (List<ForeignKeyCol> fkCols : fki.getForeignKeys().values()) {
-      List<String> foreignKeyTableQualifiedName = Lists.newArrayList(name);
+      List<String> foreignKeyTableQualifiedName = qualifiedTblName;
       String parentDatabaseName = fkCols.get(0).parentDatabaseName;
       String parentTableName = fkCols.get(0).parentTableName;
-      String parentFullyQualifiedName;
+      List<String> parentTableQualifiedName = new ArrayList<>();
       if (parentDatabaseName != null && !parentDatabaseName.isEmpty()) {
-        parentFullyQualifiedName = parentDatabaseName + "." + parentTableName;
-      }
-      else {
-        parentFullyQualifiedName = parentTableName;
+        parentTableQualifiedName.add(parentDatabaseName);
       }
-      List<String> parentTableQualifiedName = Lists.newArrayList(parentFullyQualifiedName);
+      parentTableQualifiedName.add(parentTableName);
       Table parentTab = null;
       try {
         // TODO: We have a cache for Table objects in SemanticAnalyzer::getTableObjectByName()

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index a0cdcb6..f008c4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -33,12 +33,15 @@ import org.antlr.runtime.tree.TreeVisitorAction;
 import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.adapter.druid.DruidSchema;
 import org.apache.calcite.adapter.druid.DruidTable;
+import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.adapter.jdbc.JdbcConvention;
+import org.apache.calcite.adapter.jdbc.JdbcImplementor;
 import org.apache.calcite.adapter.jdbc.JdbcSchema;
 import org.apache.calcite.adapter.jdbc.JdbcTable;
 import org.apache.calcite.config.CalciteConnectionConfig;
 import org.apache.calcite.config.CalciteConnectionConfigImpl;
 import org.apache.calcite.config.CalciteConnectionProperty;
+import org.apache.calcite.config.NullCollation;
 import org.apache.calcite.interpreter.BindableConvention;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptMaterialization;
@@ -97,7 +100,9 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlLiteral;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlSampleSpec;
 import org.apache.calcite.sql.SqlWindow;
+import org.apache.calcite.sql.dialect.HiveSqlDialect;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.ArraySqlType;
 import org.apache.calcite.sql.type.SqlTypeName;
@@ -467,8 +472,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
             this.ctx.setCboInfo("Plan optimized by CBO.");
             this.ctx.setCboSucceeded(true);
           } else {
-            // 1. Gen Optimized AST
-            ASTNode newAST = getOptimizedAST();
+            // 0. Gen Optimized Plan
+            final RelNode newPlan = logicalPlan();
+            // 1. Convert Plan to AST
+            ASTNode newAST = getOptimizedAST(newPlan);
 
             // 1.1. Fix up the query for insert/ctas/materialized views
             newAST = fixUpAfterCbo(ast, newAST, cboCtx);
@@ -539,7 +546,14 @@ public class CalcitePlanner extends SemanticAnalyzer {
             LOG.info("CBO Succeeded; optimized logical plan.");
             this.ctx.setCboInfo("Plan optimized by CBO.");
             this.ctx.setCboSucceeded(true);
+            if (this.ctx.isExplainPlan()) {
+              ExplainConfiguration explainConfig = this.ctx.getExplainConfig();
+              if (explainConfig.isExtended() || explainConfig.isFormatted()) {
+                this.ctx.setOptimizedSql(getOptimizedSql(newPlan));
+              }
+            }
             if (LOG.isTraceEnabled()) {
+              LOG.trace(getOptimizedSql(newPlan));
               LOG.trace(newAST.dump());
             }
           }
@@ -1442,13 +1456,57 @@ public class CalcitePlanner extends SemanticAnalyzer {
   }
 
   /**
+   * Get SQL rewrite for a Calcite logical plan
+   *
+   * @return Optimized SQL text (or null, if failed)
+   */
+  public String getOptimizedSql(RelNode optimizedOptiqPlan) {
+    SqlDialect dialect = new HiveSqlDialect(SqlDialect.EMPTY_CONTEXT
+        .withDatabaseProduct(SqlDialect.DatabaseProduct.HIVE)
+        .withDatabaseMajorVersion(4) // TODO: should not be hardcoded
+        .withDatabaseMinorVersion(0)
+        .withIdentifierQuoteString("`")
+        .withNullCollation(NullCollation.LOW)) {
+      @Override
+      protected boolean allowsAs() {
+        return true;
+      }
+
+      @Override
+      public boolean supportsCharSet() {
+        return false;
+      }
+    };
+    try {
+      final JdbcImplementor jdbcImplementor =
+          new JdbcImplementor(dialect, (JavaTypeFactory) optimizedOptiqPlan.getCluster()
+              .getTypeFactory());
+      final JdbcImplementor.Result result = jdbcImplementor.visitChild(0, optimizedOptiqPlan);
+      String sql = result.asStatement().toSqlString(dialect).getSql();
+      return sql.replaceAll("VARCHAR\\(2147483647\\)", "STRING");
+    } catch (Exception ex) {
+      LOG.warn("Rel2SQL Rewrite threw error", ex);
+    }
+    return null;
+  }
+
+  /**
    * Get Optimized AST for the given QB tree in the semAnalyzer.
    *
    * @return Optimized operator tree translated in to Hive AST
    * @throws SemanticException
    */
   ASTNode getOptimizedAST() throws SemanticException {
-    RelNode optimizedOptiqPlan = logicalPlan();
+    return getOptimizedAST(logicalPlan());
+  }
+
+  /**
+   * Get Optimized AST for the given QB tree in the semAnalyzer.
+   *
+   * @return Optimized operator tree translated in to Hive AST
+   * @throws SemanticException
+   */
+  ASTNode getOptimizedAST(RelNode optimizedOptiqPlan) throws SemanticException {
     ASTNode optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema,
             HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COLUMN_ALIGNMENT));
     return optiqOptimizedAST;
@@ -2149,7 +2207,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
                       ImmutableList.<RelNode>of(dq.getTableScan()));
                 } else {
                   newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION),
-                      (RelOptHiveTable) scan.getTable(), scan.getTable().getQualifiedName().get(0),
+                      (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(),
                       null, false, false);
                 }
                 return newScan;
@@ -2779,13 +2837,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
           // Build row type from field <type, name>
           RelDataType rowType = TypeConverter.getType(cluster, cIList);
           // Build RelOptAbstractTable
-          String fullyQualifiedTabName = tabMetaData.getDbName();
-          if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
-            fullyQualifiedTabName = fullyQualifiedTabName + "." + tabMetaData.getTableName();
-          }
-          else {
-            fullyQualifiedTabName = tabMetaData.getTableName();
+          List<String> fullyQualifiedTabName = new ArrayList<>();
+          if (tabMetaData.getDbName() != null && !tabMetaData.getDbName().isEmpty()) {
+            fullyQualifiedTabName.add(tabMetaData.getDbName());
           }
+          fullyQualifiedTabName.add(tabMetaData.getTableName());
 
           if (tableType == TableType.DRUID) {
             // Build Druid query
@@ -2870,13 +2926,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
           // Build row type from field <type, name>
           RelDataType rowType = inferNotNullableColumns(tabMetaData, TypeConverter.getType(cluster, rr, null));
           // Build RelOptAbstractTable
-          String fullyQualifiedTabName = tabMetaData.getDbName();
-          if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
-            fullyQualifiedTabName = fullyQualifiedTabName + "." + tabMetaData.getTableName();
-          }
-          else {
-            fullyQualifiedTabName = tabMetaData.getTableName();
+          List<String> fullyQualifiedTabName = new ArrayList<>();
+          if (tabMetaData.getDbName() != null && !tabMetaData.getDbName().isEmpty()) {
+            fullyQualifiedTabName.add(tabMetaData.getDbName());
           }
+          fullyQualifiedTabName.add(tabMetaData.getTableName());
           RelOptHiveTable optTable = new RelOptHiveTable(relOptSchema, fullyQualifiedTabName,
               rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf,
               partitionCache, colStatsCache, noColsMissingStats);

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 6f0a803..feec0fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -206,7 +206,8 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
         fetchTask,
         sem,
         config,
-        ctx.getCboInfo());
+        ctx.getCboInfo(),
+        ctx.getOptimizedSql());
 
     work.setAppendTaskType(
         HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES));

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
index 2cdf8cf..ce03003 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
@@ -50,6 +50,8 @@ public class ExplainWork implements Serializable {
 
   String cboInfo;
 
+  private String optimizedSQL;
+
   private transient BaseSemanticAnalyzer analyzer;
 
   public ExplainWork() {
@@ -61,7 +63,8 @@ public class ExplainWork implements Serializable {
       Task<?> fetchTask,
       BaseSemanticAnalyzer analyzer,
       ExplainConfiguration config,
-      String cboInfo) {
+      String cboInfo,
+      String optimizedSQL) {
     this.resFile = resFile;
     this.rootTasks = new ArrayList<Task<?>>(rootTasks);
     this.fetchTask = fetchTask;
@@ -71,6 +74,7 @@ public class ExplainWork implements Serializable {
     }
     this.pCtx = pCtx;
     this.cboInfo = cboInfo;
+    this.optimizedSQL = optimizedSQL;
     this.config = config;
   }
 
@@ -170,6 +174,14 @@ public class ExplainWork implements Serializable {
     this.cboInfo = cboInfo;
   }
 
+  public String getOptimizedSQL() {
+    return optimizedSQL;
+  }
+
+  public void setOptimizedSQL(String optimizedSQL) {
+    this.optimizedSQL = optimizedSQL;
+  }
+
   public ExplainConfiguration getConfig() {
     return config;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java
index 5a77722..244bc73 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java
@@ -218,7 +218,7 @@ public class TestExplainTask {
 
 
     JsonNode result = objectMapper.readTree(uut.getJSONPlan(null, tasks, null, true,
-            false, false).toString());
+            false, false, null).toString());
     JsonNode expected = objectMapper.readTree("{\"STAGE DEPENDENCIES\":{\"mockTaskId\":" +
             "{\"ROOT STAGE\":\"TRUE\",\"BACKUP STAGE\":\"backup-id-mock\"}},\"STAGE PLANS\":" +
             "{\"mockTaskId\":{}}}");

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
index ffd0445..470263b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
@@ -299,7 +299,7 @@ public class TestUpdateDeleteSemanticAnalyzer {
     ExplainConfiguration config = new ExplainConfiguration();
     config.setExtended(true);
     ExplainWork work = new ExplainWork(tmp, sem.getParseContext(), sem.getRootTasks(),
-        sem.getFetchTask(), sem, config, null);
+        sem.getFetchTask(), sem, config, null, plan.getOptimizedQueryString());
     ExplainTask task = new ExplainTask();
     task.setWork(work);
     task.initialize(queryState, plan, null, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out
index 19fcc8c..f4cc694 100644
--- a/ql/src/test/results/clientpositive/acid_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out
@@ -32,6 +32,9 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
 select sum(a) from acid_vectorized_n1 where false
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT SUM(`a`) AS `$f0`
+FROM `default`.`acid_vectorized_n1`
+WHERE FALSE
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
index 8e2a745..5d033a3 100644
--- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
@@ -70,6 +70,9 @@ PREHOOK: query: explain extended select count(*) from alter_coltype where dt = '
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) from alter_coltype where dt = '100'
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT COUNT(*) AS `$f0`
+FROM `default`.`alter_coltype`
+WHERE `dt` = 100
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -117,6 +120,9 @@ PREHOOK: query: explain extended select count(*) from alter_coltype where ts = '
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = '6.30'
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT COUNT(*) AS `$f0`
+FROM `default`.`alter_coltype`
+WHERE `ts` = 6.3
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -140,6 +146,9 @@ PREHOOK: query: explain extended select count(*) from alter_coltype where ts = 3
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = 3.0 and dt=100
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT COUNT(*) AS `$f0`
+FROM `default`.`alter_coltype`
+WHERE `ts` = 3 AND `dt` = 100
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -216,6 +225,9 @@ PREHOOK: query: explain extended select key, value, dt, ts from alter_coltype wh
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value, dt, ts from alter_coltype where dt is not null
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`, `dt`, `ts`
+FROM `default`.`alter_coltype`
+WHERE `dt` IS NOT NULL
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -453,6 +465,9 @@ PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table w
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1'
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `intcol`
+FROM `pt`.`alterdynamic_part_table`
+WHERE `partcol1` = 1 AND `partcol2` = '1'
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -524,6 +539,9 @@ PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table w
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__')
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `intcol`
+FROM `pt`.`alterdynamic_part_table`
+WHERE `partcol1` = 2 AND `partcol2` = '1' OR `partcol1` = 1 AND `partcol2` = '__HIVE_DEFAULT_PARTITION__'
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/ambiguitycheck.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out
index d7fee3d..80c9582 100644
--- a/ql/src/test/results/clientpositive/ambiguitycheck.q.out
+++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out
@@ -705,6 +705,9 @@ PREHOOK: query: explain extended select int(1.2) from src limit 1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select int(1.2) from src limit 1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT CAST(1.2 AS INTEGER) AS `_o__c0`
+FROM `default`.`src`
+LIMIT 1
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
index a8cfb97..353813e 100644
--- a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
+++ b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
@@ -87,6 +87,8 @@ PREHOOK: query: EXPLAIN EXTENDED select * from test2_n6
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED select * from test2_n6
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `name`, `age`
+FROM `default`.`test2_n6`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
index 1756019..0362f50 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
@@ -24,6 +24,8 @@ PREHOOK: query: explain extended select * from src_multi1_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select * from src_multi1_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src_multi1_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
index 07f1cba..b79c78d 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
@@ -24,6 +24,8 @@ PREHOOK: query: explain extended select * from src_multi1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select * from src_multi1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src_multi1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
index 259142d..eb608d4 100644
--- a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
@@ -80,6 +80,23 @@ JOIN `orderpayment_small` `order_city` ON `order_city`.`cityid` = `orderpayment`
 JOIN `user_small` `user` ON `user`.`userid` = `orderpayment`.`userid`
 limit 5
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t4`.`date`, `t6`.`dealid`
+FROM (SELECT `userid`
+FROM `default`.`user_small`
+WHERE `userid` IS NOT NULL) AS `t0`
+INNER JOIN ((SELECT `dealid`, `date`, `cityid`, `userid`
+FROM `default`.`orderpayment_small`
+WHERE `date` IS NOT NULL AND `dealid` IS NOT NULL AND `cityid` IS NOT NULL AND `userid` IS NOT NULL) AS `t2`
+INNER JOIN (SELECT `date`
+FROM `default`.`orderpayment_small`
+WHERE `date` IS NOT NULL) AS `t4` ON `t2`.`date` = `t4`.`date`
+INNER JOIN (SELECT `dealid`
+FROM `default`.`orderpayment_small`
+WHERE `dealid` IS NOT NULL) AS `t6` ON `t2`.`dealid` = `t6`.`dealid`
+INNER JOIN (SELECT `cityid`
+FROM `default`.`orderpayment_small`
+WHERE `cityid` IS NOT NULL) AS `t8` ON `t2`.`cityid` = `t8`.`cityid`) ON `t0`.`userid` = `t2`.`userid`
+LIMIT 5
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket1.q.out b/ql/src/test/results/clientpositive/bucket1.q.out
index 07b5b9d..f39af86 100644
--- a/ql/src/test/results/clientpositive/bucket1.q.out
+++ b/ql/src/test/results/clientpositive/bucket1.q.out
@@ -14,6 +14,8 @@ POSTHOOK: query: explain extended
 insert overwrite table bucket1_1
 select * from src
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket2.q.out b/ql/src/test/results/clientpositive/bucket2.q.out
index e8a1dd7..8d1d066 100644
--- a/ql/src/test/results/clientpositive/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/bucket2.q.out
@@ -14,6 +14,8 @@ POSTHOOK: query: explain extended
 insert overwrite table bucket2_1
 select * from src
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket3.q.out b/ql/src/test/results/clientpositive/bucket3.q.out
index d9ee0dc..ed7da20 100644
--- a/ql/src/test/results/clientpositive/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/bucket3.q.out
@@ -14,6 +14,8 @@ POSTHOOK: query: explain extended
 insert overwrite table bucket3_1 partition (ds='1')
 select * from src
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
index 9fc4fd9..7764706 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
@@ -116,6 +116,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n19`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n16`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6
@@ -560,6 +567,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n19`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n16`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
index 61eea99..090a0db 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
@@ -100,6 +100,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n12`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n10`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6
@@ -544,6 +551,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n12`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n10`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
index 5322a06..692b8e7 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
@@ -100,6 +100,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n4`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n3`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6
@@ -544,6 +551,13 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
+FROM (SELECT `key`, `value`
+FROM `default`.`srcbucket_mapjoin_part_n4`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`srcbucket_mapjoin_part_2_n3`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out
index 31d7870..5f0cea5 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out
@@ -66,6 +66,16 @@ POSTHOOK: query: explain extended
 select a.key as key, a.value as val1, b.value as val2, c.value as val3
 from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value` AS `val1`, `t2`.`value` AS `val2`, `t4`.`value` AS `val3`
+FROM (SELECT `key`, `value`
+FROM `default`.`tbl1_n0`
+WHERE `key` IS NOT NULL AND `value` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`
+FROM `default`.`tbl2_n0`
+WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
+INNER JOIN (SELECT `value`
+FROM `default`.`tbl3`
+WHERE `value` IS NOT NULL) AS `t4` ON `t0`.`value` = `t4`.`value`
 STAGE DEPENDENCIES:
   Stage-7 is a root stage
   Stage-5 depends on stages: Stage-7
@@ -431,6 +441,16 @@ POSTHOOK: query: explain extended
 select a.key as key, a.value as val1, b.value as val2, c.value as val3
 from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`value` AS `val1`, `t2`.`value` AS `val2`, `t4`.`value` AS `val3`
+FROM (SELECT `key`, `value`
+FROM `default`.`tbl1_n0`
+WHERE `key` IS NOT NULL AND `value` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`
+FROM `default`.`tbl2_n0`
+WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
+INNER JOIN (SELECT `value`
+FROM `default`.`tbl3`
+WHERE `value` IS NOT NULL) AS `t4` ON `t0`.`value` = `t4`.`value`
 STAGE DEPENDENCIES:
   Stage-7 is a root stage
   Stage-5 depends on stages: Stage-7

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/combine2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/combine2.q.out b/ql/src/test/results/clientpositive/combine2.q.out
index 0ea3a21..7c39a68 100644
--- a/ql/src/test/results/clientpositive/combine2.q.out
+++ b/ql/src/test/results/clientpositive/combine2.q.out
@@ -127,6 +127,9 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
 select count(1) from combine2_n0 where value is not null
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT COUNT(*) AS `$f0`
+FROM `default`.`combine2_n0`
+WHERE `value` IS NOT NULL
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/comments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/comments.q.out b/ql/src/test/results/clientpositive/comments.q.out
index 5fd4696..314707d 100644
--- a/ql/src/test/results/clientpositive/comments.q.out
+++ b/ql/src/test/results/clientpositive/comments.q.out
@@ -75,6 +75,13 @@ PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) from src a jo
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) from src a join src b on a.key = b.key where a.key > 0
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT COUNT(*) AS `$f0`
+FROM (SELECT `key`
+FROM `default`.`src`
+WHERE `key` > 0) AS `t0`
+INNER JOIN (SELECT `key`
+FROM `default`.`src`
+WHERE `key` > 0) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-5 is a root stage
   Stage-2 depends on stages: Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
index cb8185d..532e977 100644
--- a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
+++ b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
@@ -5,6 +5,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
  select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT CAST('429' AS STRING) AS `ak`, `t0`.`value` AS `av`, `t1`.`key` AS `bk`, `t1`.`value` AS `bv`
+FROM (SELECT CAST('429' AS STRING) AS `key`, `value`
+FROM `default`.`src`
+WHERE `key` = '429') AS `t0`,
+(SELECT `key`, `value`
+FROM `default`.`src1`) AS `t1`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
index a79b8e7..97922c2 100644
--- a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
@@ -46,6 +46,9 @@ PREHOOK: query: explain extended select intcol from dynamic_part_table where par
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1'
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `intcol`
+FROM `default`.`dynamic_part_table`
+WHERE `partcol1` = '1' AND `partcol2` = '1'
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -117,6 +120,9 @@ PREHOOK: query: explain extended select intcol from dynamic_part_table where par
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1'
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `intcol`
+FROM `default`.`dynamic_part_table`
+WHERE `partcol1` = '1' AND `partcol2` = '1'
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -188,6 +194,9 @@ PREHOOK: query: explain extended select intcol from dynamic_part_table where (pa
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select intcol from dynamic_part_table where (partcol1='1' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__')
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `intcol`
+FROM `default`.`dynamic_part_table`
+WHERE `partcol1` = '1' AND (`partcol2` = '1' OR `partcol2` = '__HIVE_DEFAULT_PARTITION__')
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
index b1942a1..f8f0a76 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
@@ -536,6 +536,13 @@ PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WH
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WHERE t1.key = t2.key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT *
+FROM (SELECT `key`, `value`
+FROM `default`.`src`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`
+FROM `default`.`encrypted_table`
+WHERE `key` IS NOT NULL) AS `t2` ON CAST(`t0`.`key` AS DOUBLE) = CAST(`t2`.`key` AS DOUBLE)
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
index be3af8f..59118f5 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
@@ -54,6 +54,13 @@ PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key)
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT *
+FROM (SELECT `key`, `value`
+FROM `default`.`table_key_1`
+WHERE `key` IS NOT NULL) AS `t0`
+INNER JOIN (SELECT `key`, `value`
+FROM `default`.`table_key_2`
+WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out b/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
index 8ada9b6..a93f36b 100644
--- a/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
+++ b/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
@@ -90,6 +90,8 @@ PREHOOK: query: explain extended select key, value from srcpart
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value from srcpart
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`srcpart`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -389,6 +391,8 @@ PREHOOK: query: explain extended select key, value from src
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value from src
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `value`
+FROM `default`.`src`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
index 68abf2b..eadaaca 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
@@ -119,6 +119,8 @@ PREHOOK: query: explain extended select d_date from date_dim_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select d_date from date_dim_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `d_date`
+FROM `default`.`date_dim_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
index f80599d..c4cd90c 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
@@ -105,6 +105,8 @@ PREHOOK: query: explain extended select state from loc_orc_1d
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state from loc_orc_1d
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`
+FROM `default`.`loc_orc_1d`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -220,6 +222,8 @@ PREHOOK: query: explain extended select state,locid from loc_orc_1d
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state,locid from loc_orc_1d
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`, `locid`
+FROM `default`.`loc_orc_1d`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -424,6 +428,8 @@ PREHOOK: query: explain extended select state from loc_orc_2d
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state from loc_orc_2d
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`
+FROM `default`.`loc_orc_2d`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -635,6 +641,8 @@ PREHOOK: query: explain extended select state,locid from loc_orc_2d
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state,locid from loc_orc_2d
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`, `locid`
+FROM `default`.`loc_orc_2d`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
index e1024be..3e7d0f4 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
@@ -131,6 +131,8 @@ PREHOOK: query: explain extended select state from loc_orc_1d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state from loc_orc_1d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`
+FROM `default`.`loc_orc_1d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -338,6 +340,8 @@ PREHOOK: query: explain extended select state,locid from loc_orc_1d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state,locid from loc_orc_1d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`, `locid`
+FROM `default`.`loc_orc_1d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -573,6 +577,8 @@ PREHOOK: query: explain extended select state from loc_orc_1d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state from loc_orc_1d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`
+FROM `default`.`loc_orc_1d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -780,6 +786,8 @@ PREHOOK: query: explain extended select state,locid from loc_orc_1d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state,locid from loc_orc_1d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`, `locid`
+FROM `default`.`loc_orc_1d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -1069,6 +1077,8 @@ PREHOOK: query: explain extended select state from loc_orc_2d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state from loc_orc_2d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`
+FROM `default`.`loc_orc_2d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -1609,6 +1619,8 @@ PREHOOK: query: explain extended select state,locid from loc_orc_2d_n1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select state,locid from loc_orc_2d_n1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `state`, `locid`
+FROM `default`.`loc_orc_2d_n1`
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/filter_aggr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_aggr.q.out b/ql/src/test/results/clientpositive/filter_aggr.q.out
index 3bb650a..0b42dca 100644
--- a/ql/src/test/results/clientpositive/filter_aggr.q.out
+++ b/ql/src/test/results/clientpositive/filter_aggr.q.out
@@ -16,6 +16,9 @@ select key, c, 2 as m from (select key, count(key) as c from src group by key)s2
 )sub
 where m = 1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(`key`) AS `c`, 1 AS `m`
+FROM `default`.`src`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
index a86997b..07843d0 100644
--- a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
+++ b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
@@ -28,6 +28,16 @@ SELECT f.key, g.value
 FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
 JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t2`.`key`, `t0`.`value`
+FROM (SELECT `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`filter_join_breaktask`
+WHERE `ds` = '2008-04-08' AND `value` <> '') AS `t0`
+INNER JOIN ((SELECT `key`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`filter_join_breaktask`
+WHERE `ds` = '2008-04-08' AND `key` IS NOT NULL) AS `t2`
+INNER JOIN (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`
+FROM `default`.`filter_join_breaktask`
+WHERE `ds` = '2008-04-08' AND `value` <> '' AND `key` IS NOT NULL) AS `t4` ON `t2`.`key` = `t4`.`key`) ON `t0`.`value` = `t4`.`value`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/filter_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_union.q.out b/ql/src/test/results/clientpositive/filter_union.q.out
index d0ac4ae..a7cd8c1 100644
--- a/ql/src/test/results/clientpositive/filter_union.q.out
+++ b/ql/src/test/results/clientpositive/filter_union.q.out
@@ -24,6 +24,13 @@ select key, c, 4 as m from (select key, count(key) as c from src group by key)s4
 )sub
 where m >2
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(`key`) AS `c`, 3 AS `m`
+FROM `default`.`src`
+GROUP BY `key`
+UNION ALL
+SELECT `key`, COUNT(`key`) AS `c`, 4 AS `m`
+FROM `default`.`src`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1, Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
index 210dae7..aaf89ae 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
@@ -42,6 +42,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n18
 SELECT key, count(1) FROM T1_n80 GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -485,6 +488,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl2_n5
 SELECT key, val, count(1) FROM T1_n80 GROUP BY key, val
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, `val`, COUNT(*) AS `$f2`
+FROM `default`.`t1_n80`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -793,6 +799,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n18
 SELECT key, count(1) FROM (SELECT key, val FROM T1_n80) subq1 GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -1228,6 +1237,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n18
 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n80) subq1 GROUP BY k
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -1671,6 +1683,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl3_n2
 SELECT 1, key, count(1) FROM T1_n80 GROUP BY 1, key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT 1 AS `_o__c0`, `key`, COUNT(*) AS `_o__c2`
+FROM `default`.`t1_n80`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -2115,6 +2130,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4_n2
 SELECT key, 1, val, count(1) FROM T1_n80 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, 1 AS `_o__c1`, `val`, COUNT(*) AS `_o__c3`
+FROM `default`.`t1_n80`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2424,6 +2442,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl3_n2
 SELECT key, key + 1, count(1) FROM T1_n80 GROUP BY key, key + 1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key` AS `$f0`, CAST(`key` AS DOUBLE) + CAST(1 AS DOUBLE) AS `$f1`, COUNT(*) AS `$f2`
+FROM `default`.`t1_n80`
+GROUP BY `key`, CAST(`key` AS DOUBLE) + CAST(1 AS DOUBLE)
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2735,6 +2756,9 @@ SELECT key + key, sum(cnt) from
 (SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq1
 group by key + key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT CAST(`key` AS DOUBLE) + CAST(`key` AS DOUBLE) AS `$f0`, SUM(COUNT(*)) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY CAST(`key` AS DOUBLE) + CAST(`key` AS DOUBLE)
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3063,6 +3087,13 @@ SELECT key, count(1) FROM T1_n80 GROUP BY key
 SELECT key, count(1) FROM T1_n80 GROUP BY key
 ) subq1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
+UNION ALL
+SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -3590,6 +3621,13 @@ SELECT key, count(1) FROM T1_n80 GROUP BY key
 SELECT cast(key + key as string) as key, count(1) FROM T1_n80 GROUP BY key + key
 ) subq1
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+GROUP BY `key`
+UNION ALL
+SELECT CAST(CAST(`key` AS DOUBLE) + CAST(`key` AS DOUBLE) AS STRING) AS `key`, COUNT(*) AS `_o__c1`
+FROM `default`.`t1_n80`
+GROUP BY CAST(`key` AS DOUBLE) + CAST(`key` AS DOUBLE)
 STAGE DEPENDENCIES:
   Stage-9 is a root stage
   Stage-2 depends on stages: Stage-9
@@ -4244,6 +4282,15 @@ JOIN
 (SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq2
 ON subq1.key = subq2.key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `t0`.`key`, `t0`.`$f1` + `t2`.`$f1` AS `_o__c1`
+FROM (SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+WHERE `key` IS NOT NULL
+GROUP BY `key`) AS `t0`
+INNER JOIN (SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+WHERE `key` IS NOT NULL
+GROUP BY `key`) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4590,6 +4637,15 @@ JOIN
 (SELECT key, val, count(1) FROM T1_n80 GROUP BY key, val) subq2
 ON subq1.key = subq2.key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT *
+FROM (SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n80`
+WHERE `key` IS NOT NULL
+GROUP BY `key`) AS `t0`
+INNER JOIN (SELECT `key`, `val`, COUNT(*) AS `$f2`
+FROM `default`.`t1_n80`
+WHERE `key` IS NOT NULL
+GROUP BY `key`, `val`) AS `t2` ON `t0`.`key` = `t2`.`key`
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -4893,6 +4949,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n18
 SELECT key, count(1) FROM T2_n49 GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t2_n49`
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -5200,6 +5259,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4_n2
 SELECT key, 1, val, count(1) FROM T2_n49 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, 1 AS `_o__c1`, `val`, COUNT(*) AS `_o__c3`
+FROM `default`.`t2_n49`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -5646,6 +5708,9 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl5_n2
 SELECT key, 1, val, 2, count(1) FROM T2_n49 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, 1 AS `_o__c1`, `val`, 2 AS `_o__c3`, COUNT(*) AS `_o__c4`
+FROM `default`.`t2_n49`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -6091,6 +6156,9 @@ SELECT key, constant, val, count(1) from
 (SELECT key, 1 as constant, val from T2_n49)subq
 group by key, constant, val
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, 1 AS `constant`, `val`, COUNT(*) AS `_o__c3`
+FROM `default`.`t2_n49`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
@@ -6543,6 +6611,9 @@ SELECT key, constant as constant2, val, 2 as constant3 from
 )subq2
 group by key, constant3, val
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, 2 AS `constant3`, `val`, COUNT(*) AS `_o__c3`
+FROM `default`.`t2_n49`
+GROUP BY `key`, `val`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/6d15ce49/ql/src/test/results/clientpositive/groupby_sort_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_6.q.out b/ql/src/test/results/clientpositive/groupby_sort_6.q.out
index 2bc7e41..bc31a57 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_6.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_6.q.out
@@ -22,6 +22,10 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n15
 SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n61`
+WHERE `ds` = '1'
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -280,6 +284,10 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n15
 SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n61`
+WHERE `ds` = '1'
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -529,6 +537,10 @@ POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n15
 SELECT key, count(1) FROM T1_n61 where ds = '2' GROUP BY key
 POSTHOOK: type: QUERY
+OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
+FROM `default`.`t1_n61`
+WHERE `ds` = '2'
+GROUP BY `key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1