You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/20 21:46:17 UTC

svn commit: r1633210 [1/14] - in /hive/branches/spark/ql/src: java/org/apache/hadoop/hive/ql/optimizer/ test/results/clientpositive/spark/

Author: xuefu
Date: Mon Oct 20 19:46:13 2014
New Revision: 1633210

URL: http://svn.apache.org/r1633210
Log:
HIVE-8496: Re-enable statistics [Spark Branch] (Chao via Xuefu)

Modified:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_position.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/having.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/innerjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input1_limit.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/insert1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/insert_into1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/insert_into2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/insert_into3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join0.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join22.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join26.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join27.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/merge1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/merge2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/order.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/order2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/parallel.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/parallel_join0.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/script_pipe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/subquery_exists.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/subquery_in.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/transform1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java Mon Oct 20 19:46:13 2014
@@ -135,7 +135,7 @@ public class Optimizer {
     if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) {
       transformations.add(new StatsOptimizer());
     }
-    if (pctx.getContext().getExplain() && !isSparkExecEngine && !isTezExecEngine) {
+    if (isSparkExecEngine || (pctx.getContext().getExplain() && !isTezExecEngine)) {
       transformations.add(new AnnotateWithStatistics());
       transformations.add(new AnnotateWithOpTraits());
     }

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out Mon Oct 20 19:46:13 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket2.q.out Mon Oct 20 19:46:13 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket3.q.out Mon Oct 20 19:46:13 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket4.q.out Mon Oct 20 19:46:13 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out Mon Oct 20 19:46:13 2014
@@ -92,9 +92,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: t1
+          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: key (type: string)
             outputColumnNames: _col0
+            Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1
@@ -122,9 +124,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: t1
+          Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: key (type: string)
             outputColumnNames: _col0
+            Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
@@ -384,22 +388,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -409,11 +419,14 @@ STAGE PLANS:
                   0 {KEY.reducesinkkey0}
                   1 
                 outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string)
                   outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -498,22 +511,28 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: ((val = 3) and key is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: ((val = 3) and key is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -523,11 +542,14 @@ STAGE PLANS:
                   0 {KEY.reducesinkkey0}
                   1 {KEY.reducesinkkey0}
                 outputColumnNames: _col0, _col5
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), '3' (type: string), _col5 (type: string), '3' (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -579,28 +601,36 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: ((key = 6) and val is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Select Operator
                       expressions: val (type: string)
                       outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: ((key = 5) and val is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Select Operator
                       expressions: val (type: string)
                       outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -610,11 +640,14 @@ STAGE PLANS:
                   0 {KEY.reducesinkkey0}
                   1 
                 outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string)
                   outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -681,39 +714,50 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t3
+                  Statistics: Num rows: 0 Data size: 35 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       value expressions: val (type: string)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -723,11 +767,14 @@ STAGE PLANS:
                   0 {KEY.reducesinkkey0}
                   1 {KEY.reducesinkkey0} {VALUE._col0}
                 outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -741,13 +788,16 @@ STAGE PLANS:
                   0 {KEY.reducesinkkey0}
                   1 
                 outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string)
                   outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/count.q.out Mon Oct 20 19:46:13 2014
@@ -48,18 +48,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
                       keys: a (type: int), b (type: int), c (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
                         sort order: +++
                         Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col5 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -68,11 +72,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -115,17 +122,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1), count(), count(a), count(b), count(c), count(d), count(DISTINCT a), count(DISTINCT b), count(DISTINCT c), count(DISTINCT d), count(DISTINCT a, b), count(DISTINCT b, c), count(DISTINCT c, d), count(DISTINCT a, d), count(DISTINCT a, c), count(DISTINCT b, d), count(DISTINCT a, b, c), count(DISTINCT b, c, d), count(DISTINCT a, c, d), count(DISTINCT a, b, d), count(DISTINCT a, b, c, d)
                       keys: a (type: int), b (type: int), c (type: int), d (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int)
                         sort order: ++++
+                        Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -133,11 +144,14 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY.
 _col0:14._col3)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                  Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -177,13 +191,16 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: a (type: int), b (type: int), c (type: int)
                       sort order: +++
                       Map-reduce partition columns: a (type: int)
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
         Reducer 2 
             Reduce Operator Tree:
@@ -192,11 +209,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: complete
                 outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -239,23 +259,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: abcd
+                  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
+                    Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                       sort order: ++++
+                      Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, K
 EY._col0:14._col3)
                 mode: complete
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/ctas.q.out Mon Oct 20 19:46:13 2014
@@ -41,31 +41,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -181,31 +190,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -321,31 +339,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: (key / 2) (type: double), concat(value, '_con') (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: double), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: double), _col1 (type: string)
                     sort order: ++
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -526,31 +553,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -709,13 +745,16 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       tag: -1
                       auto parallelism: true
             Path -> Alias:
@@ -773,11 +812,14 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
                     auto parallelism: false
         Reducer 3 
@@ -786,13 +828,16 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 1
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out Mon Oct 20 19:46:13 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out Mon Oct 20 19:46:13 2014
@@ -21,20 +21,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -67,20 +72,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out Mon Oct 20 19:46:13 2014
@@ -21,20 +21,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
                       Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -67,20 +72,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
                       Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out Mon Oct 20 19:46:13 2014
@@ -21,19 +21,24 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -66,19 +71,24 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out Mon Oct 20 19:46:13 2014
@@ -21,19 +21,24 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -66,19 +71,24 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby1.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby1.q.out Mon Oct 20 19:46:13 2014
@@ -34,13 +34,16 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: rand() (type: double)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       value expressions: substr(value, 5) (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -49,10 +52,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: partial1
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -61,11 +66,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
                   outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out Mon Oct 20 19:46:13 2014
@@ -31,13 +31,16 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string)
                       sort order: ++
                       Map-reduce partition columns: substr(key, 1, 1) (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -45,11 +48,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: complete
                 outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string)
                   outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3.q.out Mon Oct 20 19:46:13 2014
@@ -50,21 +50,26 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string)
                     outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: substr(value, 5) (type: string)
                       sort order: +
                       Map-reduce partition columns: substr(value, 5) (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0)
                 mode: partial1
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
+                  Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: double), _col1 (type: struct<count:bigint,sum:double,input:string>), _col2 (type: struct<count:bigint,sum:double,input:string>), _col3 (type: string), _col4 (type: string), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>)
         Reducer 3 
             Reduce Operator Tree:
@@ -72,11 +77,14 @@ STAGE PLANS:
                 aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out Mon Oct 20 19:46:13 2014
@@ -49,17 +49,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string)
                     outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5))
                       keys: substr(value, 5) (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: string), _col5 (type: string), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: struct<count:bigint,sum:double,variance:double>)
         Reducer 2 
             Reduce Operator Tree:
@@ -67,11 +71,14 @@ STAGE PLANS:
                 aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out?rev=1633210&r1=1633209&r2=1633210&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out Mon Oct 20 19:46:13 2014
@@ -53,17 +53,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string)
                     outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5)), sum(DISTINCT substr(value, 5)), count(DISTINCT substr(value, 5))
                       keys: substr(value, 5) (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: string), _col5 (type: string), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: struct<count:bigint,sum:double,variance:double>)
         Reducer 2 
             Reduce Operator Tree:
@@ -71,11 +75,14 @@ STAGE PLANS:
                 aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8), sum(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), UDFToDouble(_col10) (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat