You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/09/24 05:16:26 UTC

svn commit: r1627210 [1/14] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Author: xuefu
Date: Wed Sep 24 03:16:25 2014
New Revision: 1627210

URL: http://svn.apache.org/r1627210
Log:
HIVE-8207: Add .q tests for multi-table insertion [Spark Branch] (Chao via Xuefu)

Added:
    hive/branches/spark/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/date_udf.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_position.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/innerjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input1_limit.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/insert_into3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/parallel.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
Modified:
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1627210&r1=1627209&r2=1627210&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Wed Sep 24 03:16:25 2014
@@ -349,17 +349,20 @@ minimr.query.negative.files=cluster_task
   minimr_broken_pipe.q,\
   udf_local_resource.q
 
-spark.query.files=alter_merge_orc.q \
+spark.query.files=add_part_multiple.q \
+  alter_merge_orc.q \
   alter_merge_stats_orc.q \
   avro_compression_enabled_native.q \
   avro_decimal_native.q \
   bucket2.q \
   bucket3.q \
   bucket4.q \
+  column_access_stats.q \
   count.q \
   create_merge_compressed.q \
   ctas.q \
   custom_input_output_format.q \
+  date_udf.q \
   disable_merge_for_bucketing.q \
   enforce_order.q \
   escape_clusterby1.q \
@@ -369,11 +372,37 @@ spark.query.files=alter_merge_orc.q \
   groupby1.q \
   groupby2.q \
   groupby3.q \
+  groupby3_map.q \
+  groupby3_map_multi_distinct.q \
+  groupby3_noskew.q \
+  groupby3_noskew_multi_distinct.q \
   groupby4.q \
+  groupby7_map.q \
+  groupby7_map_multi_single_reducer.q \
+  groupby7_map_skew.q \
+  groupby7_noskew.q \
+  groupby_cube1.q \
+  groupby_multi_single_reducer.q \
+  groupby_multi_single_reducer2.q \
+  groupby_multi_single_reducer3.q \
+  groupby_position.q \
+  groupby_ppr.q \
+  groupby_rollup1.q \
+  groupby_sort_1_23.q \
+  groupby_sort_skew_1_23.q \
   having.q \
+  innerjoin.q \
+  input12.q \
+  input13.q \
+  input14.q \
+  input17.q \
+  input18.q \
+  input1_limit.q \
+  input_part2.q \
   insert1.q \
   insert_into1.q \
   insert_into2.q \
+  insert_into3.q \
   join0.q \
   join1.q \
   join2.q \
@@ -404,6 +433,7 @@ spark.query.files=alter_merge_orc.q \
   join27.q \
   join_1to1.q \
   join_casesensitive.q \
+  join_nullsafe.q \
   limit_pushdown.q \
   load_dyn_part1.q \
   load_dyn_part2.q \
@@ -413,9 +443,20 @@ spark.query.files=alter_merge_orc.q \
   merge1.q \
   merge2.q \
   metadata_only_queries.q \
+  metadata_only_queries_with_filters.q \
+  multi_insert.q \
+  multi_insert_gby.q \
+  multi_insert_gby2.q \
+  multi_insert_gby3.q \
+  multi_insert_lateral_view.q \
+  multi_insert_move_tasks_share_dependencies.q \
+  multigroupby_singlemr.q \
   optimize_nullscan.q \
   order.q \
   order2.q \
+  parallel.q \
+  ppd_multi_insert.q \
+  ppd_transform.q \
   ptf_decimal.q \
   ptf_general_queries.q \
   ptf_matchpath.q \
@@ -437,6 +478,7 @@ spark.query.files=alter_merge_orc.q \
   scriptfile1.q \
   sort.q \
   spark_test.q \
+  subquery_multiinsert.q \
   temp_table.q \
   timestamp_1.q \
   timestamp_2.q \
@@ -498,6 +540,7 @@ spark.query.files=alter_merge_orc.q \
   load_dyn_part5.q \
   load_dyn_part6.q \
   load_dyn_part7.q \
+  load_dyn_part8.q \
   load_dyn_part9.q \
   load_dyn_part10.q \
   load_dyn_part11.q \

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,113 @@
+PREHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored
+
+CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@add_part_test
+POSTHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored
+
+CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@add_part_test
+PREHOOK: query: explain
+ALTER TABLE add_part_test ADD IF NOT EXISTS
+PARTITION (ds='2010-01-01') location 'A'
+PARTITION (ds='2010-02-01') location 'B'
+PARTITION (ds='2010-03-01')
+PARTITION (ds='2010-04-01') location 'C'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: query: explain
+ALTER TABLE add_part_test ADD IF NOT EXISTS
+PARTITION (ds='2010-01-01') location 'A'
+PARTITION (ds='2010-02-01') location 'B'
+PARTITION (ds='2010-03-01')
+PARTITION (ds='2010-04-01') location 'C'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Add Partition Operator:
+#### A masked pattern was here ####
+          Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01}
+
+PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS
+PARTITION (ds='2010-01-01') location 'A'
+PARTITION (ds='2010-02-01') location 'B'
+PARTITION (ds='2010-03-01')
+PARTITION (ds='2010-04-01') location 'C'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: A
+PREHOOK: Input: B
+PREHOOK: Input: C
+PREHOOK: Output: default@add_part_test
+POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS
+PARTITION (ds='2010-01-01') location 'A'
+PARTITION (ds='2010-02-01') location 'B'
+PARTITION (ds='2010-03-01')
+PARTITION (ds='2010-04-01') location 'C'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: A
+POSTHOOK: Input: B
+POSTHOOK: Input: C
+POSTHOOK: Output: default@add_part_test
+POSTHOOK: Output: default@add_part_test@ds=2010-01-01
+POSTHOOK: Output: default@add_part_test@ds=2010-02-01
+POSTHOOK: Output: default@add_part_test@ds=2010-03-01
+POSTHOOK: Output: default@add_part_test@ds=2010-04-01
+PREHOOK: query: from src TABLESAMPLE (1 ROWS)
+insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100
+insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200
+insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300
+insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@add_part_test@ds=2010-01-01
+PREHOOK: Output: default@add_part_test@ds=2010-02-01
+PREHOOK: Output: default@add_part_test@ds=2010-03-01
+PREHOOK: Output: default@add_part_test@ds=2010-04-01
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: from src TABLESAMPLE (1 ROWS)
+insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100
+insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200
+insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300
+insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@add_part_test@ds=2010-01-01
+POSTHOOK: Output: default@add_part_test@ds=2010-02-01
+POSTHOOK: Output: default@add_part_test@ds=2010-03-01
+POSTHOOK: Output: default@add_part_test@ds=2010-04-01
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).key SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).value SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).key SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).value SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).key SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).value SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).key SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).value SIMPLE []
+PREHOOK: query: select * from add_part_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@add_part_test
+PREHOOK: Input: default@add_part_test@ds=2010-01-01
+PREHOOK: Input: default@add_part_test@ds=2010-02-01
+PREHOOK: Input: default@add_part_test@ds=2010-03-01
+PREHOOK: Input: default@add_part_test@ds=2010-04-01
+#### A masked pattern was here ####
+POSTHOOK: query: select * from add_part_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@add_part_test
+POSTHOOK: Input: default@add_part_test@ds=2010-01-01
+POSTHOOK: Input: default@add_part_test@ds=2010-02-01
+POSTHOOK: Input: default@add_part_test@ds=2010-03-01
+POSTHOOK: Input: default@add_part_test@ds=2010-04-01
+#### A masked pattern was here ####
+100	100	2010-01-01
+200	200	2010-02-01
+400	300	2010-03-01
+500	400	2010-04-01

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/column_access_stats.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,933 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+-- This test is used for testing the ColumnAccessAnalyzer
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T2
+PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T3
+PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T4
+PREHOOK: query: -- Simple select queries
+SELECT key FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT key, val FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: SELECT 1 FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+1
+PREHOOK: query: SELECT key, val from T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:key,p,val
+
+PREHOOK: query: SELECT val FROM T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:p,val
+
+PREHOOK: query: SELECT p, val FROM T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:p,val
+
+PREHOOK: query: -- More complicated select queries
+EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT key + 1 as k FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+2.0
+3.0
+4.0
+8.0
+9.0
+9.0
+PREHOOK: query: SELECT key + val as k FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+12.0
+14.0
+16.0
+24.0
+26.0
+36.0
+PREHOOK: query: -- Work with union
+EXPLAIN
+SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT val as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Union 2 <- Map 1 (NONE), Map 3 (NONE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Select Operator
+                    expressions: val (type: string)
+                    outputColumnNames: _col0
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT val as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+1
+11
+12
+13
+17
+18
+2
+28
+3
+7
+8
+8
+PREHOOK: query: EXPLAIN
+SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT key as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Union 2 <- Map 1 (NONE), Map 3 (NONE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT key as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+1
+2
+2
+3
+3
+7
+7
+8
+8
+8
+8
+PREHOOK: query: -- Work with insert overwrite
+FROM T1
+INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2
+PREHOOK: Output: default@t3
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+Table:default@t1
+Columns:key,val
+
+PREHOOK: query: -- Simple joins
+SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key,val
+
+Table:default@t1
+Columns:key,val
+
+1	11	1	1
+2	12	2	1
+3	13	3	1
+7	17	7	1
+8	18	8	2
+8	28	8	2
+PREHOOK: query: EXPLAIN
+SELECT T1.key
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT), Map 3 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT T1.key
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key
+
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = T2.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key,val
+
+Table:default@t1
+Columns:key,val
+
+PREHOOK: query: -- Map join
+SELECT /*+ MAPJOIN(a) */ * 
+FROM T1 a JOIN T2 b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key,val
+
+Table:default@t1
+Columns:key,val
+
+1	11	1	1
+2	12	2	1
+3	13	3	1
+7	17	7	1
+8	18	8	2
+8	28	8	2
+PREHOOK: query: -- More joins
+EXPLAIN
+SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = 3 and T2.val = 3
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT), Map 3 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: ((val = 3) and key is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: ((val = 3) and key is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 {KEY.reducesinkkey0}
+                outputColumnNames: _col0, _col5
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), '3' (type: string), _col5 (type: string), '3' (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = 3 and T2.val = 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key,val
+
+Table:default@t1
+Columns:key,val
+
+PREHOOK: query: EXPLAIN
+SELECT subq1.val
+FROM 
+(
+  SELECT val FROM T1 WHERE key = 5  
+) subq1
+JOIN 
+(
+  SELECT val FROM T2 WHERE key = 6
+) subq2 
+ON subq1.val = subq2.val
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT), Map 3 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: ((key = 6) and val is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: val (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: ((key = 5) and val is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: val (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT subq1.val
+FROM 
+(
+  SELECT val FROM T1 WHERE key = 5  
+) subq1
+JOIN 
+(
+  SELECT val FROM T2 WHERE key = 6
+) subq2 
+ON subq1.val = subq2.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key,val
+
+Table:default@t1
+Columns:key,val
+
+PREHOOK: query: -- Join followed by join
+EXPLAIN
+SELECT *
+FROM
+(
+  SELECT subq1.key as key
+  FROM
+  (
+    SELECT key, val FROM T1
+  ) subq1
+  JOIN
+  (
+    SELECT key, 'teststring' as val FROM T2
+  ) subq2
+  ON subq1.key = subq2.key
+) T4
+JOIN T3
+ON T3.key = T4.key
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT), Reducer 4 (GROUP SORT)
+        Reducer 4 <- Map 3 (GROUP SORT), Map 5 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t3
+                  Statistics: Num rows: 0 Data size: 35 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      value expressions: val (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 4 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT *
+FROM
+(
+  SELECT subq1.key as key
+  FROM
+  (
+    SELECT key, val FROM T1
+  ) subq1
+  JOIN
+  (
+    SELECT key, 'teststring' as val FROM T2
+  ) subq2
+  ON subq1.key = subq2.key
+) T4
+JOIN T3
+ON T3.key = T4.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+Table:default@t2
+Columns:key
+
+Table:default@t1
+Columns:key
+
+Table:default@t3
+Columns:key,val
+
+1	1	11.0
+2	2	12.0
+3	3	13.0
+7	7	17.0
+8	8	46.0
+8	8	46.0
+PREHOOK: query: -- for partitioned table
+SELECT * FROM srcpart TABLESAMPLE (10 ROWS)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,hr,key,value
+
+165	val_165	2008-04-08	11
+238	val_238	2008-04-08	11
+255	val_255	2008-04-08	11
+27	val_27	2008-04-08	11
+278	val_278	2008-04-08	11
+311	val_311	2008-04-08	11
+409	val_409	2008-04-08	11
+484	val_484	2008-04-08	11
+86	val_86	2008-04-08	11
+98	val_98	2008-04-08	11
+PREHOOK: query: SELECT key,ds FROM srcpart TABLESAMPLE (10 ROWS) WHERE hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,hr,key
+
+165	2008-04-08
+238	2008-04-08
+255	2008-04-08
+27	2008-04-08
+278	2008-04-08
+311	2008-04-08
+409	2008-04-08
+484	2008-04-08
+86	2008-04-08
+98	2008-04-08
+PREHOOK: query: SELECT value FROM srcpart TABLESAMPLE (10 ROWS) WHERE ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,value
+
+val_165
+val_238
+val_255
+val_27
+val_278
+val_311
+val_409
+val_484
+val_86
+val_98

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/date_udf.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/date_udf.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/date_udf.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/date_udf.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,242 @@
+PREHOOK: query: drop table date_udf
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table date_udf
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table date_udf_string
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table date_udf_string
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table date_udf_flight
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table date_udf_flight
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table date_udf (d date)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@date_udf
+POSTHOOK: query: create table date_udf (d date)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@date_udf
+PREHOOK: query: create table date_udf_string (d string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@date_udf_string
+POSTHOOK: query: create table date_udf_string (d string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@date_udf_string
+PREHOOK: query: from src
+  insert overwrite table date_udf 
+    select '2011-05-06' limit 1
+  insert overwrite table date_udf_string
+    select '2011-05-06' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@date_udf
+PREHOOK: Output: default@date_udf_string
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: from src
+  insert overwrite table date_udf 
+    select '2011-05-06' limit 1
+  insert overwrite table date_udf_string
+    select '2011-05-06' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@date_udf
+POSTHOOK: Output: default@date_udf_string
+POSTHOOK: Lineage: date_udf.d EXPRESSION []
+POSTHOOK: Lineage: date_udf_string.d SIMPLE []
+PREHOOK: query: create table date_udf_flight (
+  ORIGIN_CITY_NAME string,
+  DEST_CITY_NAME string,
+  FL_DATE date,
+  ARR_DELAY float,
+  FL_NUM int
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@date_udf_flight
+POSTHOOK: query: create table date_udf_flight (
+  ORIGIN_CITY_NAME string,
+  DEST_CITY_NAME string,
+  FL_DATE date,
+  ARR_DELAY float,
+  FL_NUM int
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@date_udf_flight
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@date_udf_flight
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@date_udf_flight
+PREHOOK: query: -- Test UDFs with date input
+select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d),
+    weekofyear(d), to_date(d)
+  from date_udf
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+POSTHOOK: query: -- Test UDFs with date input
+select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d),
+    weekofyear(d), to_date(d)
+  from date_udf
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+1304665200	2011	5	6	6	18	2011-05-06
+PREHOOK: query: select date_add(d, 5), date_sub(d, 10)
+  from date_udf
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+POSTHOOK: query: select date_add(d, 5), date_sub(d, 10)
+  from date_udf
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+2011-05-11	2011-04-26
+PREHOOK: query: select datediff(d, d), datediff(d, '2002-03-21'), datediff('2002-03-21', d),
+    datediff(cast ('2002-03-21 00:00:00' as timestamp), d),
+    datediff(d, cast ('2002-03-21 00:00:00' as timestamp))
+  from date_udf
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+POSTHOOK: query: select datediff(d, d), datediff(d, '2002-03-21'), datediff('2002-03-21', d),
+    datediff(cast ('2002-03-21 00:00:00' as timestamp), d),
+    datediff(d, cast ('2002-03-21 00:00:00' as timestamp))
+  from date_udf
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+0	3333	-3333	-3332	3332
+PREHOOK: query: -- Test UDFs with string input
+select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), 
+    weekofyear(d), to_date(d)
+  from date_udf_string
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+POSTHOOK: query: -- Test UDFs with string input
+select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), 
+    weekofyear(d), to_date(d)
+  from date_udf_string
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+NULL	2011	5	6	6	18	2011-05-06
+PREHOOK: query: select date_add(d, 5), date_sub(d, 10)  from date_udf_string
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+POSTHOOK: query: select date_add(d, 5), date_sub(d, 10)  from date_udf_string
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+2011-05-11	2011-04-26
+PREHOOK: query: select datediff(d, d), datediff(d, '2002-03-21'), datediff('2002-03-21', d),
+    datediff('2002-03-21 00:00:00', d),
+    datediff(d, '2002-03-21 00:00:00')
+  from date_udf_string
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+POSTHOOK: query: select datediff(d, d), datediff(d, '2002-03-21'), datediff('2002-03-21', d),
+    datediff('2002-03-21 00:00:00', d),
+    datediff(d, '2002-03-21 00:00:00')
+  from date_udf_string
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_string
+#### A masked pattern was here ####
+0	3333	-3333	-3333	3333
+PREHOOK: query: select 
+    to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles'),
+    from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles'),
+    to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles'),
+    from_utc_timestamp(date '2013-06-19', 'America/Los_Angeles')
+  from date_udf
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+    to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles'),
+    from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles'),
+    to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles'),
+    from_utc_timestamp(date '2013-06-19', 'America/Los_Angeles')
+  from date_udf
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+1970-01-01 08:00:00	1969-12-31 16:00:00	2013-06-19 07:00:00	2013-06-18 17:00:00
+PREHOOK: query: -- should all be true
+select 
+    to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'),
+    from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'),
+    to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'),
+    from_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = from_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles')
+  from date_udf
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+POSTHOOK: query: -- should all be true
+select 
+    to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'),
+    from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'),
+    to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'),
+    from_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = from_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles')
+  from date_udf
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf
+#### A masked pattern was here ####
+true	true	true	true
+PREHOOK: query: -- Aggregation functions (min/max)
+select min(fl_date) from date_udf_flight
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_flight
+#### A masked pattern was here ####
+POSTHOOK: query: -- Aggregation functions (min/max)
+select min(fl_date) from date_udf_flight
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_flight
+#### A masked pattern was here ####
+2010-10-20
+PREHOOK: query: select max(fl_date) from date_udf_flight
+PREHOOK: type: QUERY
+PREHOOK: Input: default@date_udf_flight
+#### A masked pattern was here ####
+POSTHOOK: query: select max(fl_date) from date_udf_flight
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@date_udf_flight
+#### A masked pattern was here ####
+2010-10-31
+PREHOOK: query: drop table date_udf
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@date_udf
+PREHOOK: Output: default@date_udf
+POSTHOOK: query: drop table date_udf
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@date_udf
+POSTHOOK: Output: default@date_udf
+PREHOOK: query: drop table date_udf_string
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@date_udf_string
+PREHOOK: Output: default@date_udf_string
+POSTHOOK: query: drop table date_udf_string
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@date_udf_string
+POSTHOOK: Output: default@date_udf_string
+PREHOOK: query: drop table date_udf_flight
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@date_udf_flight
+PREHOOK: Output: default@date_udf_flight
+POSTHOOK: query: drop table date_udf_flight
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@date_udf_flight
+POSTHOOK: Output: default@date_udf_flight

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,150 @@
+PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: string)
+                    outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5))
+                      keys: substr(value, 5) (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: string), _col5 (type: string), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: struct<count:bigint,sum:double,variance:double>)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+130091.0	260.182	256.10355987055016	98.0	0.0	142.9268095075238	143.06995106518906	20428.072876	20469.01089779559

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,160 @@
+PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: string)
+                    outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5)), sum(DISTINCT substr(value, 5)), count(DISTINCT substr(value, 5))
+                      keys: substr(value, 5) (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,input:string>), _col4 (type: string), _col5 (type: string), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: struct<count:bigint,sum:double,variance:double>)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8), sum(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), UDFToDouble(_col10) (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c10 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.c11 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+130091.0	260.182	256.10355987055016	98.0	0.0	142.9268095075238	143.06995106518906	20428.072876	20469.01089779559	79136.0	309.0

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,143 @@
+PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: string)
+                    outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: substr(value, 5) (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0)
+                mode: complete
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+130091.0	260.182	256.10355987055016	98.0	0.0	142.92680950752379	143.06995106518903	20428.07287599999	20469.010897795582

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out?rev=1627210&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out Wed Sep 24 03:16:25 2014
@@ -0,0 +1,153 @@
+PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: string)
+                    outputColumnNames: value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: substr(value, 5) (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0), sum(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0)
+                mode: complete
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), UDFToDouble(_col10) (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+  sum(substr(src.value,5)),
+  avg(substr(src.value,5)),
+  avg(DISTINCT substr(src.value,5)),
+  max(substr(src.value,5)),
+  min(substr(src.value,5)),
+  std(substr(src.value,5)),
+  stddev_samp(substr(src.value,5)),
+  variance(substr(src.value,5)),
+  var_samp(substr(src.value,5)),
+  sum(DISTINCT substr(src.value, 5)),
+  count(DISTINCT substr(src.value, 5))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c10 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.c11 EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+130091.0	260.182	256.10355987055016	98.0	0.0	142.92680950752379	143.06995106518903	20428.07287599999	20469.010897795582	79136.0	309.0