You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by px...@apache.org on 2016/05/09 17:42:03 UTC

[21/21] hive git commit: HIVE-13341: Stats state is not captured correctly: differentiate load table and create table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

HIVE-13341: Stats state is not captured correctly: differentiate load table and create table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/244ce09c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/244ce09c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/244ce09c

Branch: refs/heads/master
Commit: 244ce09c935050b7d30c8beb507e50a1413ae70e
Parents: 2a8e388
Author: Pengcheng Xiong <px...@apache.org>
Authored: Mon May 9 10:01:38 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Mon May 9 10:41:03 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hive/common/StatsSetupConst.java     |  19 +-
 .../hadoop/hive/ql/history/TestHiveHistory.java |   2 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |   7 +
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |  20 +-
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |  73 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  34 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  14 +
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   2 +-
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |  11 +
 .../hadoop/hive/ql/exec/TestExecDriver.java     |   2 +-
 .../clientpositive/alter_table_add_partition.q  |  13 +
 .../insert_values_orig_table_use_metadata.q     | 121 +++
 ql/src/test/queries/clientpositive/stats20.q    |   2 +
 .../clientnegative/alter_file_format.q.out      |   5 +
 .../clientnegative/unset_table_property.q.out   |   2 +
 .../clientpositive/alter_file_format.q.out      |  37 +
 .../alter_partition_clusterby_sortby.q.out      |   9 +
 .../clientpositive/alter_skewed_table.q.out     |  21 +
 .../alter_table_add_partition.q.out             | 202 ++++
 .../clientpositive/alter_table_not_sorted.q.out |   7 +
 .../clientpositive/auto_sortmerge_join_1.q.out  |  30 +
 .../clientpositive/auto_sortmerge_join_11.q.out |  32 +
 .../clientpositive/auto_sortmerge_join_12.q.out |  14 +
 .../clientpositive/auto_sortmerge_join_2.q.out  |  26 +
 .../clientpositive/auto_sortmerge_join_3.q.out  |  24 +
 .../clientpositive/auto_sortmerge_join_4.q.out  |  24 +
 .../clientpositive/auto_sortmerge_join_5.q.out  |  24 +
 .../clientpositive/auto_sortmerge_join_7.q.out  |  36 +
 .../clientpositive/auto_sortmerge_join_8.q.out  |  36 +
 .../clientpositive/binary_output_format.q.out   |  40 +
 .../test/results/clientpositive/bucket1.q.out   |  10 +
 .../test/results/clientpositive/bucket2.q.out   |  10 +
 .../test/results/clientpositive/bucket4.q.out   |  10 +
 .../test/results/clientpositive/bucket5.q.out   |  50 +
 .../results/clientpositive/bucket_many.q.out    |  10 +
 .../clientpositive/bucket_map_join_1.q.out      |   4 +
 .../clientpositive/bucket_map_join_2.q.out      |   4 +
 .../clientpositive/bucket_map_join_spark1.q.out |  22 +
 .../clientpositive/bucket_map_join_spark2.q.out |  22 +
 .../clientpositive/bucket_map_join_spark3.q.out |  22 +
 .../clientpositive/bucketcontext_1.q.out        |  10 +
 .../clientpositive/bucketcontext_2.q.out        |  10 +
 .../clientpositive/bucketcontext_3.q.out        |   8 +
 .../clientpositive/bucketcontext_4.q.out        |   8 +
 .../clientpositive/bucketcontext_5.q.out        |   8 +
 .../clientpositive/bucketcontext_6.q.out        |   8 +
 .../clientpositive/bucketcontext_7.q.out        |  12 +
 .../clientpositive/bucketcontext_8.q.out        |  12 +
 .../results/clientpositive/bucketmapjoin1.q.out |  48 +
 .../clientpositive/bucketmapjoin10.q.out        |   8 +
 .../clientpositive/bucketmapjoin11.q.out        |  16 +
 .../clientpositive/bucketmapjoin12.q.out        |   8 +
 .../results/clientpositive/bucketmapjoin2.q.out |  54 +
 .../results/clientpositive/bucketmapjoin3.q.out |  48 +
 .../results/clientpositive/bucketmapjoin4.q.out |  48 +
 .../results/clientpositive/bucketmapjoin5.q.out |  48 +
 .../results/clientpositive/bucketmapjoin7.q.out |   4 +
 .../results/clientpositive/bucketmapjoin8.q.out |   8 +
 .../results/clientpositive/bucketmapjoin9.q.out |   8 +
 .../clientpositive/bucketmapjoin_negative.q.out |  46 +
 .../bucketmapjoin_negative2.q.out               |  48 +
 .../bucketmapjoin_negative3.q.out               |  36 +
 .../columnStatsUpdateForStatsOptimizer_1.q.out  |   2 +
 ...names_with_leading_and_trailing_spaces.q.out |   5 +
 .../clientpositive/columnstats_partlvl.q.out    |   4 +
 .../clientpositive/columnstats_tbllvl.q.out     |   8 +
 .../create_alter_list_bucketing_table1.q.out    |  15 +
 .../results/clientpositive/create_like.q.out    |  12 +
 .../clientpositive/create_like_view.q.out       |   5 +
 .../clientpositive/create_skewed_table1.q.out   |  15 +
 .../clientpositive/database_location.q.out      |  10 +
 .../clientpositive/default_file_format.q.out    |  15 +
 .../describe_comment_indent.q.out               |   5 +
 .../describe_comment_nonascii.q.out             |   2 +
 .../describe_formatted_view_partitioned.q.out   |   5 +
 .../clientpositive/describe_syntax.q.out        |  10 +
 .../disable_merge_for_bucketing.q.out           |  10 +
 .../display_colstats_tbllvl.q.out               |   4 +
 .../encryption_join_unencrypted_tbl.q.out       |   4 +
 .../clientpositive/groupby_map_ppr.q.out        |  10 +
 .../groupby_map_ppr_multi_distinct.q.out        |  10 +
 .../results/clientpositive/groupby_ppr.q.out    |  10 +
 .../groupby_ppr_multi_distinct.q.out            |  10 +
 .../clientpositive/groupby_sort_1_23.q.out      | 140 +++
 .../results/clientpositive/groupby_sort_6.q.out |  12 +
 .../clientpositive/groupby_sort_skew_1_23.q.out | 140 +++
 .../results/clientpositive/input_part1.q.out    |  40 +
 .../results/clientpositive/input_part2.q.out    |  80 ++
 .../insert_values_orig_table_use_metadata.q.out | 994 +++++++++++++++++++
 ql/src/test/results/clientpositive/join17.q.out |  10 +
 ql/src/test/results/clientpositive/join26.q.out |  40 +
 ql/src/test/results/clientpositive/join32.q.out |  10 +
 .../clientpositive/join32_lessSize.q.out        |  20 +
 ql/src/test/results/clientpositive/join33.q.out |  10 +
 ql/src/test/results/clientpositive/join34.q.out |  15 +
 ql/src/test/results/clientpositive/join35.q.out |  15 +
 ql/src/test/results/clientpositive/join9.q.out  |  10 +
 .../results/clientpositive/join_map_ppr.q.out   |  40 +
 .../clientpositive/list_bucket_dml_14.q.out     |  10 +
 .../list_bucket_dml_8.q.java1.7.out             |  10 +-
 .../clientpositive/mapjoin_memcheck.q.out       |  16 +-
 .../results/clientpositive/metadataonly1.q.out  | 100 ++
 .../results/clientpositive/nullformat.q.out     |   5 +
 .../results/clientpositive/orc_create.q.out     |  10 +
 .../test/results/clientpositive/orc_llap.q.out  |  54 +-
 .../clientpositive/orc_predicate_pushdown.q.out | 128 +--
 .../parquet_array_null_element.q.out            |   5 +
 .../results/clientpositive/parquet_create.q.out |   5 +
 .../parquet_mixed_partition_formats.q.out       |   4 +
 .../results/clientpositive/parquet_serde.q.out  |   4 +
 .../clientpositive/part_inherit_tbl_props.q.out |   5 +
 .../part_inherit_tbl_props_empty.q.out          |   5 +
 .../part_inherit_tbl_props_with_star.q.out      |   5 +
 .../partition_coltype_literals.q.out            |  14 +
 ql/src/test/results/clientpositive/pcr.q.out    |  80 ++
 .../clientpositive/rand_partitionpruner2.q.out  |  40 +
 .../clientpositive/rcfile_default_format.q.out  |  15 +
 .../clientpositive/reduce_deduplicate.q.out     |  10 +
 .../test/results/clientpositive/sample1.q.out   |  40 +
 .../test/results/clientpositive/sample2.q.out   |  40 +
 .../test/results/clientpositive/sample4.q.out   |  40 +
 .../test/results/clientpositive/sample5.q.out   |  40 +
 .../test/results/clientpositive/sample6.q.out   |  40 +
 .../test/results/clientpositive/sample7.q.out   |  40 +
 ...schema_evol_orc_nonvec_fetchwork_table.q.out | 250 ++---
 .../schema_evol_orc_nonvec_mapwork_table.q.out  | 250 ++---
 .../schema_evol_orc_vec_mapwork_table.q.out     | 250 ++---
 .../schema_evol_text_nonvec_mapwork_table.q.out | 250 ++---
 .../schema_evol_text_vec_mapwork_table.q.out    | 250 ++---
 .../schema_evol_text_vecrow_mapwork_table.q.out | 250 ++---
 .../show_create_table_alter.q.out               |  13 +
 .../show_create_table_db_table.q.out            |   5 +
 .../show_create_table_serde.q.out               |  17 +
 .../clientpositive/show_tblproperties.q.out     |  10 +
 .../spark/auto_sortmerge_join_1.q.out           |  14 +
 .../spark/auto_sortmerge_join_12.q.out          |  10 +
 .../spark/auto_sortmerge_join_3.q.out           |  10 +
 .../spark/auto_sortmerge_join_4.q.out           |  10 +
 .../spark/auto_sortmerge_join_5.q.out           |  16 +
 .../spark/auto_sortmerge_join_7.q.out           |  16 +
 .../spark/auto_sortmerge_join_8.q.out           |  16 +
 .../results/clientpositive/spark/bucket2.q.out  |  10 +
 .../results/clientpositive/spark/bucket4.q.out  |  10 +
 .../results/clientpositive/spark/bucket5.q.out  |  20 +
 .../spark/bucket_map_join_1.q.out               |   8 +
 .../spark/bucket_map_join_2.q.out               |   8 +
 .../spark/bucket_map_join_spark1.q.out          |  18 +
 .../spark/bucket_map_join_spark2.q.out          |  18 +
 .../spark/bucket_map_join_spark3.q.out          |  18 +
 .../clientpositive/spark/bucketmapjoin1.q.out   |  22 +
 .../clientpositive/spark/bucketmapjoin10.q.out  |   8 +
 .../clientpositive/spark/bucketmapjoin11.q.out  |  16 +
 .../clientpositive/spark/bucketmapjoin12.q.out  |   8 +
 .../clientpositive/spark/bucketmapjoin2.q.out   |  24 +
 .../clientpositive/spark/bucketmapjoin3.q.out   |  18 +
 .../clientpositive/spark/bucketmapjoin4.q.out   |  26 +
 .../clientpositive/spark/bucketmapjoin5.q.out   |  26 +
 .../clientpositive/spark/bucketmapjoin7.q.out   |   4 +
 .../clientpositive/spark/bucketmapjoin8.q.out   |   8 +
 .../clientpositive/spark/bucketmapjoin9.q.out   |   8 +
 .../spark/bucketmapjoin_negative.q.out          |  16 +
 .../spark/bucketmapjoin_negative2.q.out         |  18 +
 .../spark/bucketmapjoin_negative3.q.out         |  72 ++
 .../spark/disable_merge_for_bucketing.q.out     |  10 +
 .../clientpositive/spark/groupby_map_ppr.q.out  |  10 +
 .../spark/groupby_map_ppr_multi_distinct.q.out  |  10 +
 .../clientpositive/spark/groupby_ppr.q.out      |  10 +
 .../spark/groupby_ppr_multi_distinct.q.out      |  10 +
 .../spark/groupby_sort_1_23.q.out               |  50 +
 .../spark/groupby_sort_skew_1_23.q.out          |  50 +
 .../clientpositive/spark/input_part2.q.out      |  20 +
 .../results/clientpositive/spark/join17.q.out   |  10 +
 .../results/clientpositive/spark/join26.q.out   |  10 +
 .../results/clientpositive/spark/join32.q.out   |  10 +
 .../clientpositive/spark/join32_lessSize.q.out  |  20 +
 .../results/clientpositive/spark/join33.q.out   |  10 +
 .../results/clientpositive/spark/join34.q.out   |  10 +
 .../results/clientpositive/spark/join35.q.out   |  10 +
 .../results/clientpositive/spark/join9.q.out    |  10 +
 .../clientpositive/spark/join_map_ppr.q.out     |  10 +
 .../clientpositive/spark/mapjoin_memcheck.q.out |  16 +-
 .../test/results/clientpositive/spark/pcr.q.out |  20 +
 .../spark/reduce_deduplicate.q.out              |  10 +
 .../results/clientpositive/spark/sample1.q.out  |  10 +
 .../results/clientpositive/spark/sample2.q.out  |  10 +
 .../results/clientpositive/spark/sample4.q.out  |  10 +
 .../results/clientpositive/spark/sample5.q.out  |  10 +
 .../results/clientpositive/spark/sample6.q.out  |  10 +
 .../results/clientpositive/spark/sample7.q.out  |  10 +
 .../results/clientpositive/spark/stats0.q.out   |  20 +
 .../results/clientpositive/spark/stats1.q.out   |   2 +
 .../results/clientpositive/spark/stats18.q.out  |   2 +
 .../results/clientpositive/spark/stats20.q.out  |  78 ++
 .../results/clientpositive/spark/stats3.q.out   |   7 +
 .../clientpositive/spark/vectorized_ptf.q.out   |  20 +
 ql/src/test/results/clientpositive/stats0.q.out |  50 +
 ql/src/test/results/clientpositive/stats1.q.out |   2 +
 .../test/results/clientpositive/stats11.q.out   |  56 ++
 .../test/results/clientpositive/stats18.q.out   |   2 +
 .../test/results/clientpositive/stats20.q.out   |  78 ++
 ql/src/test/results/clientpositive/stats3.q.out |   7 +
 .../tez/auto_sortmerge_join_1.q.out             |  18 +
 .../tez/auto_sortmerge_join_11.q.out            |  28 +
 .../tez/auto_sortmerge_join_12.q.out            |  10 +
 .../tez/auto_sortmerge_join_2.q.out             |  12 +
 .../tez/auto_sortmerge_join_3.q.out             |  18 +
 .../tez/auto_sortmerge_join_4.q.out             |  18 +
 .../tez/auto_sortmerge_join_5.q.out             |  24 +
 .../tez/auto_sortmerge_join_7.q.out             |  24 +
 .../tez/auto_sortmerge_join_8.q.out             |  24 +
 .../results/clientpositive/tez/bucket2.q.out    |  10 +
 .../results/clientpositive/tez/bucket4.q.out    |  10 +
 ...names_with_leading_and_trailing_spaces.q.out |   5 +
 .../tez/disable_merge_for_bucketing.q.out       |  10 +
 .../clientpositive/tez/explainuser_1.q.out      |  18 +-
 .../clientpositive/tez/metadataonly1.q.out      | 100 ++
 .../results/clientpositive/tez/sample1.q.out    |  10 +
 ...schema_evol_orc_nonvec_fetchwork_table.q.out | 154 +--
 .../schema_evol_orc_nonvec_mapwork_table.q.out  | 154 +--
 .../tez/schema_evol_orc_vec_mapwork_table.q.out | 154 +--
 .../schema_evol_text_nonvec_mapwork_table.q.out | 154 +--
 .../schema_evol_text_vec_mapwork_table.q.out    | 154 +--
 .../schema_evol_text_vecrow_mapwork_table.q.out | 154 +--
 .../tez/tez_join_result_complex.q.out           |  16 +
 .../clientpositive/tez/vectorized_ptf.q.out     |  20 +
 .../clientpositive/truncate_column.q.out        |  12 +
 .../truncate_column_list_bucket.q.out           |  16 +-
 .../clientpositive/unicode_notation.q.out       |  15 +
 .../unset_table_view_property.q.out             |  21 +
 .../results/clientpositive/vectorized_ptf.q.out |  20 +
 230 files changed, 6837 insertions(+), 1336 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 41d150c..1466b69 100644
--- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -222,16 +222,6 @@ public class StatsSetupConst {
           // old format of statsAcc, e.g., TRUE or FALSE
           LOG.debug("In StatsSetupConst, JsonParser can not parse statsAcc.");
           stats = new JSONObject(new LinkedHashMap());
-          try {
-            if (statsAcc.equals(TRUE)) {
-              stats.put(BASIC_STATS, TRUE);
-            } else {
-              stats.put(BASIC_STATS, FALSE);
-            }
-          } catch (JSONException e1) {
-            // impossible to throw any json exceptions.
-            LOG.trace(e1.getMessage());
-          }
         }
         if (!stats.has(BASIC_STATS)) {
           // duplicate key is not possible
@@ -332,4 +322,13 @@ public class StatsSetupConst {
       params.put(COLUMN_STATS_ACCURATE, stats.toString());
     }
   }
+
+  public static void setBasicStatsStateForCreateTable(Map<String, String> params, String setting) {
+    if (TRUE.equals(setting)) {
+      for (String stat : StatsSetupConst.supportedStats) {
+        params.put(stat, "0");
+      }
+    }
+    setBasicStatsState(params, setting);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
index c046708..76c1636 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
@@ -103,7 +103,7 @@ public class TestHiveHistory extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             IgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, false, false, false);
+        db.loadTable(hadoopDataFile[i], src, false, false, false, false, false);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 76220f4..da3da8b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -265,6 +265,13 @@ public class MetaStoreUtils {
   public static boolean requireCalStats(Configuration hiveConf, Partition oldPart,
     Partition newPart, Table tbl, EnvironmentContext environmentContext) {
 
+    if (environmentContext != null
+        && environmentContext.isSetProperties()
+        && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+            StatsSetupConst.DO_NOT_UPDATE_STATS))) {
+      return false;
+    }
+
     if (MetaStoreUtils.isView(tbl)) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index c2c6c65..bdda89a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -223,6 +223,14 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     }
   }
 
+  // we check if there is only one immediate child task and it is stats task
+  private boolean hasFollowingStatsTask() {
+    if (this.getNumChild() == 1) {
+      return this.getChildTasks().get(0) instanceof StatsTask;
+    }
+    return false;
+  }
+
   @Override
   public int execute(DriverContext driverContext) {
 
@@ -336,10 +344,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
         DataContainer dc = null;
         if (tbd.getPartitionSpec().size() == 0) {
           dc = new DataContainer(table.getTTable());
-          db.loadTable(tbd.getSourcePath(), tbd.getTable()
-              .getTableName(), tbd.getReplace(), work.isSrcLocal(),
-              isSkewedStoredAsDirs(tbd),
-              work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
+          db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(),
+              work.isSrcLocal(), isSkewedStoredAsDirs(tbd),
+              work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
+              hasFollowingStatsTask());
           if (work.getOutputs() != null) {
             work.getOutputs().add(new WriteEntity(table,
                 (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE :
@@ -421,7 +429,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 dpCtx.getNumDPCols(),
                 isSkewedStoredAsDirs(tbd),
                 work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
-                SessionState.get().getTxnMgr().getCurrentTxnId());
+                SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask());
 
             console.printInfo("\t Time taken to load dynamic partitions: "  +
                 (System.currentTimeMillis() - startTime)/1000.0 + " seconds");
@@ -480,7 +488,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
             db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
                 tbd.getPartitionSpec(), tbd.getReplace(),
                 tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
-                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
+                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask());
             Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
 
             if (bucketCols != null || sortCols != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 87a7667..f3c7e99 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -88,7 +88,7 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
   public int execute(DriverContext driverContext) {
 
     LOG.info("Executing stats task");
-    // Make sure that it is either an ANALYZE, INSERT OVERWRITE or CTAS command
+    // Make sure that it is either an ANALYZE, INSERT OVERWRITE (maybe load) or CTAS command
     short workComponentsPresent = 0;
     if (work.getLoadTableDesc() != null) {
       workComponentsPresent++;
@@ -163,6 +163,16 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
       if (partitions == null) {
         org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
         Map<String, String> parameters = tTable.getParameters();
+        // In the following scenarios, we need to reset the stats to true.
+        // work.getTableSpecs() != null means analyze command
+        // work.getLoadTableDesc().getReplace() is true means insert overwrite command 
+        // work.getLoadFileDesc().getDestinationCreateTable().isEmpty() means CTAS etc.
+        if (work.getTableSpecs() != null
+            || (work.getLoadTableDesc() != null && work.getLoadTableDesc().getReplace())
+            || (work.getLoadFileDesc() != null && !work.getLoadFileDesc()
+                .getDestinationCreateTable().isEmpty())) {
+          StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
+        }
         // non-partitioned tables:
         if (!existStats(parameters) && atomic) {
           return 0;
@@ -171,20 +181,22 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
         // The collectable stats for the aggregator needs to be cleared.
         // For eg. if a file is being loaded, the old number of rows are not valid
         if (work.isClearAggregatorStats()) {
-          clearStats(parameters);
-        }
-
-        if (statsAggregator != null) {
-          String prefix = getAggregationPrefix(table, null);
-          updateStats(statsAggregator, parameters, prefix, atomic);
+          // we choose to keep the invalid stats and only change the setting.
+          StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
         }
 
         updateQuickStats(wh, parameters, tTable.getSd());
-
-        // write table stats to metastore
-        if (!getWork().getNoStatsAggregator()) {
-          environmentContext = new EnvironmentContext();
-          environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+        if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
+          if (statsAggregator != null) {
+            String prefix = getAggregationPrefix(table, null);
+            updateStats(statsAggregator, parameters, prefix, atomic);
+          }
+          // write table stats to metastore
+          if (!getWork().getNoStatsAggregator()) {
+            environmentContext = new EnvironmentContext();
+            environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED,
+                StatsSetupConst.TASK);
+          }
         }
 
         getHive().alterTable(tableFullName, new Table(tTable), environmentContext);
@@ -203,6 +215,12 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
           //
           org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
           Map<String, String> parameters = tPart.getParameters();
+          if (work.getTableSpecs() != null
+              || (work.getLoadTableDesc() != null && work.getLoadTableDesc().getReplace())
+              || (work.getLoadFileDesc() != null && !work.getLoadFileDesc()
+                  .getDestinationCreateTable().isEmpty())) {
+            StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
+          }
           if (!existStats(parameters) && atomic) {
             continue;
           }
@@ -210,20 +228,21 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
           // The collectable stats for the aggregator needs to be cleared.
           // For eg. if a file is being loaded, the old number of rows are not valid
           if (work.isClearAggregatorStats()) {
-            clearStats(parameters);
-          }
-
-          if (statsAggregator != null) {
-            String prefix = getAggregationPrefix(table, partn);
-            updateStats(statsAggregator, parameters, prefix, atomic);
+            // we choose to keep the invalid stats and only change the setting.
+            StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
           }
 
           updateQuickStats(wh, parameters, tPart.getSd());
-
-          if (!getWork().getNoStatsAggregator()) {
-            environmentContext = new EnvironmentContext();
-            environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED,
-                StatsSetupConst.TASK);
+          if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
+            if (statsAggregator != null) {
+              String prefix = getAggregationPrefix(table, partn);
+              updateStats(statsAggregator, parameters, prefix, atomic);
+            }
+            if (!getWork().getNoStatsAggregator()) {
+              environmentContext = new EnvironmentContext();
+              environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED,
+                  StatsSetupConst.TASK);
+            }
           }
           updates.add(new Partition(table, tPart));
 
@@ -346,14 +365,6 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
     MetaStoreUtils.populateQuickStats(partfileStatus, parameters);
   }
 
-  private void clearStats(Map<String, String> parameters) {
-    for (String statType : StatsSetupConst.supportedStats) {
-      if (parameters.containsKey(statType)) {
-        parameters.remove(statType);
-      }
-    }
-  }
-
   private String toString(Map<String, String> parameters) {
     StringBuilder builder = new StringBuilder();
     for (String statType : StatsSetupConst.supportedStats) {

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index f4a9772..2ca4d1e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1453,10 +1453,10 @@ public class Hive {
   public void loadPartition(Path loadPath, String tableName,
       Map<String, String> partSpec, boolean replace,
       boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
-      boolean isSrcLocal, boolean isAcid) throws HiveException {
+      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
     Table tbl = getTable(tableName);
     loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs,
-        isSkewedStoreAsSubdir, isSrcLocal, isAcid);
+        isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask);
   }
 
   /**
@@ -1483,7 +1483,7 @@ public class Hive {
   public Partition loadPartition(Path loadPath, Table tbl,
       Map<String, String> partSpec, boolean replace,
       boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
-      boolean isSrcLocal, boolean isAcid) throws HiveException {
+      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
     Path tblDataLocationPath =  tbl.getDataLocation();
     try {
       /**
@@ -1562,10 +1562,19 @@ public class Hive {
       }
       if (oldPart == null) {
         newTPart.getTPartition().setParameters(new HashMap<String,String>());
+        if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          StatsSetupConst.setBasicStatsStateForCreateTable(newTPart.getParameters(),
+              StatsSetupConst.TRUE);
+        }
         MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters());
         getMSC().add_partition(newTPart.getTPartition());
       } else {
-        alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition()), null);
+        EnvironmentContext environmentContext = null;
+        if (hasFollowingStatsTask) {
+          environmentContext = new EnvironmentContext();
+          environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+        }
+        alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition()), environmentContext);
       }
       return newTPart;
     } catch (IOException e) {
@@ -1683,7 +1692,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(Path loadPath,
       String tableName, Map<String, String> partSpec, boolean replace,
-      int numDP, boolean listBucketingEnabled, boolean isAcid, long txnId)
+      int numDP, boolean listBucketingEnabled, boolean isAcid, long txnId, boolean hasFollowingStatsTask)
       throws HiveException {
 
     Set<Path> validPartitions = new HashSet<Path>();
@@ -1733,7 +1742,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>(partSpec);
         Warehouse.makeSpecFromName(fullPartSpec, partPath);
         Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace,
-            true, listBucketingEnabled, false, isAcid);
+            true, listBucketingEnabled, false, isAcid, hasFollowingStatsTask);
         partitionsMap.put(fullPartSpec, newPartition);
         if (inPlaceEligible) {
           InPlaceUpdates.rePositionCursor(ps);
@@ -1772,10 +1781,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
    *          If the source directory is LOCAL
    * @param isSkewedStoreAsSubdir
    *          if list bucketing enabled
+   * @param hasFollowingStatsTask
+   *          if there is any following stats task
    * @param isAcid true if this is an ACID based write
    */
-  public void loadTable(Path loadPath, String tableName, boolean replace,
-      boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid)
+  public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal,
+      boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask)
       throws HiveException {
 
     List<Path> newFiles = null;
@@ -1817,8 +1828,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throw new HiveException(e);
     }
 
+    EnvironmentContext environmentContext = null;
+    if (hasFollowingStatsTask) {
+      environmentContext = new EnvironmentContext();
+      environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+    }
     try {
-      alterTable(tableName, tbl, null);
+      alterTable(tableName, tbl, environmentContext);
     } catch (InvalidOperationException e) {
       throw new HiveException(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 4a6617f..87a4b7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils;
 import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory;
 import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
+import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes;
@@ -2803,6 +2804,19 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       addPartitionDesc.addPartition(currentPart, currentLocation);
     }
 
+    if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) {
+        OnePartitionDesc desc = addPartitionDesc.getPartition(index);
+        if (desc.getLocation() == null) {
+          if (desc.getPartParams() == null) {
+            desc.setPartParams(new HashMap<String, String>());
+          }
+          StatsSetupConst.setBasicStatsStateForCreateTable(desc.getPartParams(),
+              StatsSetupConst.TRUE);
+        }
+      }
+    }
+
     if (addPartitionDesc.getPartitionCount() == 0) {
       // nothing to do
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 500c7ed..d562ddf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -287,7 +287,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
   private CreateTableDesc getBaseCreateTableDescFromTable(String dbName,
       org.apache.hadoop.hive.metastore.api.Table table) {
     if ((table.getPartitionKeys() == null) || (table.getPartitionKeys().size() == 0)){
-      table.putToParameters(StatsSetupConst.DO_NOT_UPDATE_STATS,"true");
+      table.putToParameters(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
     }
     CreateTableDesc tblDesc = new CreateTableDesc(
         dbName,

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index 2dc4e11..bf808c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -810,7 +811,17 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
         }
       }
     }
+    if (getLocation() == null && !this.isCTAS) {
+      if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+        StatsSetupConst.setBasicStatsStateForCreateTable(tbl.getTTable().getParameters(),
+            StatsSetupConst.TRUE);
+      }
+    } else {
+      StatsSetupConst.setBasicStatsStateForCreateTable(tbl.getTTable().getParameters(),
+          StatsSetupConst.FALSE);
+    }
     return tbl;
   }
 
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
index 667d5c2..71dfc50 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
@@ -144,7 +144,7 @@ public class TestExecDriver extends TestCase {
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             HiveIgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, true, false, false);
+        db.loadTable(hadoopDataFile[i], src, false, true, false, false, false);
         i++;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/queries/clientpositive/alter_table_add_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_add_partition.q b/ql/src/test/queries/clientpositive/alter_table_add_partition.q
new file mode 100644
index 0000000..54c839b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/alter_table_add_partition.q
@@ -0,0 +1,13 @@
+create table mp (a int) partitioned by (b int);
+
+desc formatted mp;
+
+alter table mp add partition (b=1);
+
+desc formatted mp;
+desc formatted mp partition (b=1);
+
+insert into mp partition (b=1) values (1);
+
+desc formatted mp;
+desc formatted mp partition (b=1);

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/queries/clientpositive/insert_values_orig_table_use_metadata.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table_use_metadata.q b/ql/src/test/queries/clientpositive/insert_values_orig_table_use_metadata.q
new file mode 100644
index 0000000..73f5243
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/insert_values_orig_table_use_metadata.q
@@ -0,0 +1,121 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+set hive.compute.query.using.stats=true;
+
+create table acid_ivot(
+    ctinyint TINYINT,
+    csmallint SMALLINT,
+    cint INT,
+    cbigint BIGINT,
+    cfloat FLOAT,
+    cdouble DOUBLE,
+    cstring1 STRING,
+    cstring2 STRING,
+    ctimestamp1 TIMESTAMP,
+    ctimestamp2 TIMESTAMP,
+    cboolean1 BOOLEAN,
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+desc formatted acid_ivot;
+
+LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot;
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+select count(*) from acid_ivot;
+
+insert into table acid_ivot values
+        (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true),
+        (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true);
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+select count(*) from acid_ivot;
+
+drop table acid_ivot;
+
+create table acid_ivot(
+    ctinyint TINYINT,
+    csmallint SMALLINT,
+    cint INT,
+    cbigint BIGINT,
+    cfloat FLOAT,
+    cdouble DOUBLE,
+    cstring1 STRING,
+    cstring2 STRING,
+    ctimestamp1 TIMESTAMP,
+    ctimestamp2 TIMESTAMP,
+    cboolean1 BOOLEAN,
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+insert into table acid_ivot values
+        (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true),
+        (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true);
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+select count(*) from acid_ivot;
+
+insert into table acid_ivot values
+        (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true),
+        (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true);
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+select count(*) from acid_ivot;
+
+LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot;
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+drop table acid_ivot;
+
+create table acid_ivot like src;
+
+desc formatted acid_ivot;
+
+insert overwrite table acid_ivot select * from src;
+
+desc formatted acid_ivot;
+
+explain select count(*) from acid_ivot;
+
+select count(*) from acid_ivot;
+
+CREATE TABLE sp (key STRING COMMENT 'default', value STRING COMMENT 'default')
+PARTITIONED BY (ds STRING, hr STRING)
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH "../../data/files/kv1.txt"
+OVERWRITE INTO TABLE sp PARTITION (ds="2008-04-08", hr="11");
+
+desc formatted sp PARTITION (ds="2008-04-08", hr="11");
+
+explain select count(*) from sp where ds="2008-04-08" and hr="11";
+
+select count(*) from sp where ds="2008-04-08" and hr="11";
+
+insert into table sp PARTITION (ds="2008-04-08", hr="11") values
+        ('1', '2'), ('3', '4');
+
+desc formatted sp PARTITION (ds="2008-04-08", hr="11");
+
+analyze table sp PARTITION (ds="2008-04-08", hr="11") compute statistics;
+
+desc formatted sp PARTITION (ds="2008-04-08", hr="11");
+
+explain select count(*) from sp where ds="2008-04-08" and hr="11";
+
+select count(*) from sp where ds="2008-04-08" and hr="11";
+

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/queries/clientpositive/stats20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats20.q b/ql/src/test/queries/clientpositive/stats20.q
index 59701bd..79fd2b8 100644
--- a/ql/src/test/queries/clientpositive/stats20.q
+++ b/ql/src/test/queries/clientpositive/stats20.q
@@ -7,10 +7,12 @@ insert overwrite table stats_partitioned partition (ds='1')
 select * from src;
 -- rawDataSize is 5312 after config is turned on
 describe formatted stats_partitioned;
+describe formatted stats_partitioned partition (ds='1');
 
 set hive.stats.collect.rawdatasize=false;
 insert overwrite table stats_partitioned partition (ds='1')
 select * from src;
 -- rawDataSize is 0 after config is turned off
 describe formatted stats_partitioned;
+describe formatted stats_partitioned partition (ds='1');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientnegative/alter_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_file_format.q.out b/ql/src/test/results/clientnegative/alter_file_format.q.out
index 96f1bfb..e3f3b4c 100644
--- a/ql/src/test/results/clientnegative/alter_file_format.q.out
+++ b/ql/src/test/results/clientnegative/alter_file_format.q.out
@@ -24,6 +24,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientnegative/unset_table_property.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/unset_table_property.q.out b/ql/src/test/results/clientnegative/unset_table_property.q.out
index 0510788..0705b92 100644
--- a/ql/src/test/results/clientnegative/unset_table_property.q.out
+++ b/ql/src/test/results/clientnegative/unset_table_property.q.out
@@ -22,6 +22,8 @@ a	1
 c	3
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 totalSize	0
 #### A masked pattern was here ####
 FAILED: SemanticException [Error 10215]: Please use the following syntax if not sure whether the property existed or not:

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/alter_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_file_format.q.out b/ql/src/test/results/clientpositive/alter_file_format.q.out
index 5d83b23..14dd892 100644
--- a/ql/src/test/results/clientpositive/alter_file_format.q.out
+++ b/ql/src/test/results/clientpositive/alter_file_format.q.out
@@ -24,6 +24,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -64,6 +69,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -105,6 +112,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -146,6 +155,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -187,6 +198,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -228,6 +241,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -269,6 +284,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -325,6 +342,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -368,8 +390,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -414,8 +439,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -460,8 +488,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -506,8 +537,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -552,8 +586,11 @@ Database:           	default
 Table:              	alter_partition_format_test	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
index 184d2e4..3234792 100644
--- a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
@@ -48,8 +48,11 @@ Database:           	default
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -98,8 +101,11 @@ Database:           	default
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -148,8 +154,11 @@ Database:           	default
 Table:              	alter_table_partition_clusterby_sortby	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/alter_skewed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_skewed_table.q.out b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
index a1caa99..0f60ba3 100644
--- a/ql/src/test/results/clientpositive/alter_skewed_table.q.out
+++ b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
@@ -24,6 +24,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -64,6 +69,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -119,6 +126,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -159,6 +171,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
@@ -208,6 +222,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -250,6 +269,8 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/alter_table_add_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_add_partition.q.out b/ql/src/test/results/clientpositive/alter_table_add_partition.q.out
new file mode 100644
index 0000000..1e5e396
--- /dev/null
+++ b/ql/src/test/results/clientpositive/alter_table_add_partition.q.out
@@ -0,0 +1,202 @@
+PREHOOK: query: create table mp (a int) partitioned by (b int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mp
+POSTHOOK: query: create table mp (a int) partitioned by (b int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mp
+PREHOOK: query: desc formatted mp
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@mp
+POSTHOOK: query: desc formatted mp
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@mp
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+b                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table mp add partition (b=1)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@mp
+POSTHOOK: query: alter table mp add partition (b=1)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@mp
+POSTHOOK: Output: default@mp@b=1
+PREHOOK: query: desc formatted mp
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@mp
+POSTHOOK: query: desc formatted mp
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@mp
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+b                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted mp partition (b=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@mp
+POSTHOOK: query: desc formatted mp partition (b=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@mp
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+b                   	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	mp                  	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into mp partition (b=1) values (1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@mp@b=1
+POSTHOOK: query: insert into mp partition (b=1) values (1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@mp@b=1
+POSTHOOK: Lineage: mp PARTITION(b=1).a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted mp
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@mp
+POSTHOOK: query: desc formatted mp
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@mp
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+b                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted mp partition (b=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@mp
+POSTHOOK: query: desc formatted mp partition (b=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@mp
+# col_name            	data_type           	comment             
+	 	 
+a                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+b                   	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	mp                  	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	1                   
+	totalSize           	2                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
index 6e1ec59..566b804 100644
--- a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
@@ -24,7 +24,12 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	SORTBUCKETCOLSPREFIX	TRUE                
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -66,6 +71,8 @@ Table Parameters:
 	SORTBUCKETCOLSPREFIX	TRUE                
 #### A masked pattern was here ####
 	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
 	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
index 0902556..b1d2b23 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
@@ -166,8 +166,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -211,8 +213,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -355,8 +359,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -400,8 +406,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -521,8 +529,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -623,8 +633,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -668,8 +680,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -712,8 +726,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_small
               numFiles 2
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_small { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -794,8 +810,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -838,8 +856,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -940,8 +960,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -985,8 +1007,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1029,8 +1053,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_small
               numFiles 2
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_small { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1140,8 +1166,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1185,8 +1213,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
index 81de2b0..82a8e93 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
@@ -133,8 +133,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -237,8 +239,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -281,8 +285,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -325,8 +331,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_small
               numFiles 2
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_small { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -449,8 +457,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -553,8 +563,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -597,8 +609,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -641,8 +655,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_small
               numFiles 2
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_small { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -758,8 +774,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -862,8 +880,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -906,8 +926,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1023,8 +1045,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1069,8 +1093,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1111,8 +1137,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1234,8 +1262,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1278,8 +1308,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
index 26a11a7..d8eacbe 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
@@ -171,8 +171,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -219,8 +221,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_medium
                     numFiles 3
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_medium { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -267,8 +271,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_medium
                     numFiles 3
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_medium { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -414,8 +420,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -459,8 +467,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_big
               numFiles 4
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_big { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -504,8 +514,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_medium
               numFiles 3
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_medium { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -549,8 +561,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.bucket_small
               numFiles 2
+              numRows 0
               partition_columns ds
               partition_columns.types string
+              rawDataSize 0
               serialization.ddl struct bucket_small { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe