You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/04/10 02:07:09 UTC

svn commit: r1466300 [5/13] - in /hive/trunk: cli/src/java/org/apache/hadoop/hive/cli/ ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/

Modified: hive/trunk/ql/src/test/results/clientpositive/escape_orderby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/escape_orderby1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/escape_orderby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/escape_orderby1.q.out Wed Apr 10 00:06:55 2013
@@ -1,7 +1,9 @@
-PREHOOK: query: explain
+PREHOOK: query: -- escaped column names in order by are not working jira 3267
+explain
 select key, value from src order by key, value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- escaped column names in order by are not working jira 3267
+explain
 select key, value from src order by key, value
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:

Modified: hive/trunk/ql/src/test/results/clientpositive/escape_sortby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/escape_sortby1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/escape_sortby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/escape_sortby1.q.out Wed Apr 10 00:06:55 2013
@@ -1,7 +1,9 @@
-PREHOOK: query: explain
+PREHOOK: query: -- escaped column names in sort by are not working jira 3267
+explain
 select key, value from src sort by key, value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- escaped column names in sort by are not working jira 3267
+explain
 select key, value from src sort by key, value
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:

Modified: hive/trunk/ql/src/test/results/clientpositive/explain_dependency.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/explain_dependency.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/explain_dependency.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/explain_dependency.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,12 @@
-PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src
+PREHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command
+
+-- Create some views
+CREATE VIEW V1 AS SELECT key, value from src
 PREHOOK: type: CREATEVIEW
-POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src
+POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command
+
+-- Create some views
+CREATE VIEW V1 AS SELECT key, value from src
 POSTHOOK: type: CREATEVIEW
 POSTHOOK: Output: default@V1
 PREHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL
@@ -32,10 +38,12 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@v1
 POSTHOOK: Input: default@v2
 POSTHOOK: Output: default@V4
-PREHOOK: query: EXPLAIN DEPENDENCY 
+PREHOOK: query: -- Simple select queries, union queries and join queries
+EXPLAIN DEPENDENCY 
   SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY 
+POSTHOOK: query: -- Simple select queries, union queries and join queries
+EXPLAIN DEPENDENCY 
   SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key
 POSTHOOK: type: QUERY
 {"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
@@ -64,9 +72,11 @@ POSTHOOK: query: EXPLAIN DEPENDENCY 
   SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL
 POSTHOOK: type: QUERY
 {"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"},{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1
+PREHOOK: query: -- With views
+EXPLAIN DEPENDENCY SELECT * FROM V1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1
+POSTHOOK: query: -- With views
+EXPLAIN DEPENDENCY SELECT * FROM V1
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1]"}]}
 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2
@@ -84,9 +94,13 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4
 POSTHOOK: type: QUERY
 {"input_partitions":[{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]}
-PREHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'
+PREHOOK: query: -- The table should show up in the explain dependency even if none
+-- of the partitions are selected.
+CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'
 PREHOOK: type: CREATEVIEW
-POSTHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'
+POSTHOOK: query: -- The table should show up in the explain dependency even if none
+-- of the partitions are selected.
+CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'
 POSTHOOK: type: CREATEVIEW
 POSTHOOK: Output: default@V5
 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V5

Modified: hive/trunk/ql/src/test/results/clientpositive/explain_dependency2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/explain_dependency2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/explain_dependency2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/explain_dependency2.q.out Wed Apr 10 00:06:55 2013
@@ -1,31 +1,55 @@
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM src
+PREHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command
+
+-- select from a table which does not involve a map-reduce job
+EXPLAIN DEPENDENCY SELECT * FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command
+
+-- select from a table which does not involve a map-reduce job
+EXPLAIN DEPENDENCY SELECT * FROM src
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM src
+PREHOOK: query: -- select from a table which involves a map-reduce job
+EXPLAIN DEPENDENCY SELECT count(*) FROM src
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM src
+POSTHOOK: query: -- select from a table which involves a map-reduce job
+EXPLAIN DEPENDENCY SELECT count(*) FROM src
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
+PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
+-- and some partitions are being selected
+EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
+-- and some partitions are being selected
+EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
 POSTHOOK: type: QUERY
 {"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
+PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
+-- and none of the partitions are being selected
+EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
+-- and none of the partitions are being selected
+EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
+PREHOOK: query: -- select from a partitioned table which involves a map-reduce job
+-- and some partitions are being selected
+EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: -- select from a partitioned table which involves a map-reduce job
+-- and some partitions are being selected
+EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
 POSTHOOK: type: QUERY
 {"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
+PREHOOK: query: -- select from a partitioned table which involves a map-reduce job
+-- and none of the partitions are being selected
+EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- select from a partitioned table which involves a map-reduce job
+-- and none of the partitions are being selected
+EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
 PREHOOK: query: create table tstsrcpart like srcpart
@@ -33,13 +57,17 @@ PREHOOK: type: CREATETABLE
 POSTHOOK: query: create table tstsrcpart like srcpart
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@tstsrcpart
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null
+PREHOOK: query: -- select from a partitioned table with no partitions which does not involve a map-reduce job
+EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null
+POSTHOOK: query: -- select from a partitioned table with no partitions which does not involve a map-reduce job
+EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}]}
-PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
+PREHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job
+EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
+POSTHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job
+EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
 POSTHOOK: type: QUERY
 {"input_partitions":[],"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}]}

Modified: hive/trunk/ql/src/test/results/clientpositive/global_limit.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/global_limit.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/global_limit.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/global_limit.q.out Wed Apr 10 00:06:55 2013
@@ -37,10 +37,12 @@ PREHOOK: Output: default@gl_src1
 POSTHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@gl_src1
-PREHOOK: query: create table gl_tgt as select key from gl_src1 limit 1
+PREHOOK: query: -- need one file
+create table gl_tgt as select key from gl_src1 limit 1
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@gl_src1
-POSTHOOK: query: create table gl_tgt as select key from gl_src1 limit 1
+POSTHOOK: query: -- need one file
+create table gl_tgt as select key from gl_src1 limit 1
 POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@gl_src1
 POSTHOOK: Output: default@gl_tgt
@@ -53,11 +55,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_tgt
 #### A masked pattern was here ####
 165
-PREHOOK: query: select 'x' as key_new , split(value,',') as value_new from gl_src1 ORDER BY key_new ASC, value_new[0] ASC limit 20
+PREHOOK: query: -- need two files
+select 'x' as key_new , split(value,',') as value_new from gl_src1 ORDER BY key_new ASC, value_new[0] ASC limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
-POSTHOOK: query: select 'x' as key_new , split(value,',') as value_new from gl_src1 ORDER BY key_new ASC, value_new[0] ASC limit 20
+POSTHOOK: query: -- need two files
+select 'x' as key_new , split(value,',') as value_new from gl_src1 ORDER BY key_new ASC, value_new[0] ASC limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
@@ -81,11 +85,13 @@ x	["val_11"]
 x	["val_11"]
 x	["val_114"]
 x	["val_114"]
-PREHOOK: query: select key, value, split(value,',') as value_new from gl_src1 ORDER BY key ASC, value ASC, value_new[0] ASC limit 30
+PREHOOK: query: -- no sufficient files
+select key, value, split(value,',') as value_new from gl_src1 ORDER BY key ASC, value ASC, value_new[0] ASC limit 30
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
-POSTHOOK: query: select key, value, split(value,',') as value_new from gl_src1 ORDER BY key ASC, value ASC, value_new[0] ASC limit 30
+POSTHOOK: query: -- no sufficient files
+select key, value, split(value,',') as value_new from gl_src1 ORDER BY key ASC, value ASC, value_new[0] ASC limit 30
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
@@ -119,11 +125,13 @@ POSTHOOK: Input: default@gl_src1
 26	val_26	["val_26"]
 26	val_26	["val_26"]
 26	val_26	["val_26"]
-PREHOOK: query: select key from gl_src1 ORDER BY key ASC limit 100
+PREHOOK: query: -- need all files
+select key from gl_src1 ORDER BY key ASC limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
-POSTHOOK: query: select key from gl_src1 ORDER BY key ASC limit 100
+POSTHOOK: query: -- need all files
+select key from gl_src1 ORDER BY key ASC limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
@@ -265,11 +273,13 @@ POSTHOOK: Input: default@gl_src1
 26
 26
 26
-PREHOOK: query: select key, count(1) from gl_src1 group by key ORDER BY key ASC limit 5
+PREHOOK: query: -- not qualified cases
+select key, count(1) from gl_src1 group by key ORDER BY key ASC limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from gl_src1 group by key ORDER BY key ASC limit 5
+POSTHOOK: query: -- not qualified cases
+select key, count(1) from gl_src1 group by key ORDER BY key ASC limit 5
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
@@ -1033,11 +1043,13 @@ POSTHOOK: Input: default@gl_src1
 374
 484
 495
-PREHOOK: query: select key from (select * from (select key,value from gl_src1 limit 10)t1 )t2 ORDER BY key ASC
+PREHOOK: query: -- complicated queries
+select key from (select * from (select key,value from gl_src1 limit 10)t1 )t2 ORDER BY key ASC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
-POSTHOOK: query: select key from (select * from (select key,value from gl_src1 limit 10)t1 )t2 ORDER BY key ASC
+POSTHOOK: query: -- complicated queries
+select key from (select * from (select key,value from gl_src1 limit 10)t1 )t2 ORDER BY key ASC
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src1
 #### A masked pattern was here ####
@@ -1097,9 +1109,11 @@ POSTHOOK: Lineage: gl_tgt.key EXPRESSION
 375
 485
 496
-PREHOOK: query: create table gl_src2 (key int, value string) stored as textfile
+PREHOOK: query: -- empty table
+create table gl_src2 (key int, value string) stored as textfile
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table gl_src2 (key int, value string) stored as textfile
+POSTHOOK: query: -- empty table
+create table gl_src2 (key int, value string) stored as textfile
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@gl_src2
 POSTHOOK: Lineage: gl_tgt.key EXPRESSION [(gl_src1)gl_src1.FieldSchema(name:key, type:int, comment:null), ]
@@ -1112,9 +1126,11 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@gl_src2
 #### A masked pattern was here ####
 POSTHOOK: Lineage: gl_tgt.key EXPRESSION [(gl_src1)gl_src1.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: create table gl_src_part1 (key int, value string) partitioned by (p string) stored as textfile
+PREHOOK: query: -- partition
+create table gl_src_part1 (key int, value string) partitioned by (p string) stored as textfile
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table gl_src_part1 (key int, value string) partitioned by (p string) stored as textfile
+POSTHOOK: query: -- partition
+create table gl_src_part1 (key int, value string) partitioned by (p string) stored as textfile
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@gl_src_part1
 POSTHOOK: Lineage: gl_tgt.key EXPRESSION [(gl_src1)gl_src1.FieldSchema(name:key, type:int, comment:null), ]

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby10.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby10.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby10.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby10.q.out Wed Apr 10 00:06:55 2013
@@ -613,12 +613,14 @@ POSTHOOK: Lineage: dest2.val2 EXPRESSION
 401	401	401
 409	409	409
 484	484	484
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more
+EXPLAIN
 FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
 INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more
+EXPLAIN
 FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
 INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,10 @@
-PREHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string)
+PREHOOK: query: -- This test covers HIVE-2332
+
+create table t1 (int1 int, int2 int, str1 string, str2 string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string)
+POSTHOOK: query: -- This test covers HIVE-2332
+
+create table t1 (int1 int, int2 int, str1 string, str2 string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@t1
 PREHOOK: query: --disabled RS-dedup for keeping intention of test

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out Wed Apr 10 00:06:55 2013
@@ -9,10 +9,12 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created
+EXPLAIN
 SELECT a, b, count(*) from T1 group by a, b with cube
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created
+EXPLAIN
 SELECT a, b, count(*) from T1 group by a, b with cube
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,14 @@
-PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+PREHOOK: query: -- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b,
+-- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, 
+-- this tests that the aggregate function stores the partial aggregate state correctly even if an 
+-- additional MR job is created for processing the grouping sets.
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+POSTHOOK: query: -- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b,
+-- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, 
+-- this tests that the aggregate function stores the partial aggregate state correctly even if an 
+-- additional MR job is created for processing the grouping sets.
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@T1
 PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets1.txt' INTO TABLE T1
@@ -15,10 +23,16 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets2.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The query below will execute in a single MR job, since 4 rows are generated per input row
+-- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and 
+-- hive.new.job.grouping.set.cardinality is more than 4.
+EXPLAIN
 SELECT a, b, avg(c), count(*) from T1 group by a, b with cube
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The query below will execute in a single MR job, since 4 rows are generated per input row
+-- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and 
+-- hive.new.job.grouping.set.cardinality is more than 4.
+EXPLAIN
 SELECT a, b, avg(c), count(*) from T1 group by a, b with cube
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:
@@ -142,10 +156,14 @@ NULL	3	5.0	2
 5	1	2.0	1
 8	NULL	1.0	2
 8	1	1.0	2
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2.
+-- The partial aggregation state should be maintained correctly across MR jobs.
+EXPLAIN
 SELECT a, b, avg(c), count(*) from T1 group by a, b with cube
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2.
+-- The partial aggregation state should be maintained correctly across MR jobs.
+EXPLAIN
 SELECT a, b, avg(c), count(*) from T1 group by a, b with cube
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,10 @@
-PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+PREHOOK: query: -- Set merging to false above to make the explain more readable
+
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+POSTHOOK: query: -- Set merging to false above to make the explain more readable
+
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@T1
 PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1
@@ -9,14 +13,16 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- This tests that cubes and rollups work fine inside sub-queries.
+EXPLAIN
 SELECT * FROM
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1
 join
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2
 on subq1.a = subq2.a
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- This tests that cubes and rollups work fine inside sub-queries.
+EXPLAIN
 SELECT * FROM
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1
 join
@@ -295,14 +301,18 @@ POSTHOOK: Input: default@t1
 2	3	1	2	NULL	2
 2	3	1	2	2	1
 2	3	1	2	3	1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Since 4 grouping sets would be generated for each sub-query, an additional MR job should be created
+-- for each of them
+EXPLAIN
 SELECT * FROM
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1
 join
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2
 on subq1.a = subq2.a
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- Since 4 grouping sets would be generated for each sub-query, an additional MR job should be created
+-- for each of them
+EXPLAIN
 SELECT * FROM
 (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1
 join

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,10 @@
-PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+PREHOOK: query: -- Set merging to false above to make the explain more readable
+
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
+POSTHOOK: query: -- Set merging to false above to make the explain more readable
+
+CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@T1
 PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1
@@ -9,11 +13,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- This tests that cubes and rollups work fine where the source is a sub-query
+EXPLAIN
 SELECT a, b, count(*) FROM
 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- This tests that cubes and rollups work fine where the source is a sub-query
+EXPLAIN
 SELECT a, b, count(*) FROM
 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
 POSTHOOK: type: QUERY
@@ -189,11 +195,13 @@ NULL	3	1
 5	2	1
 8	NULL	1
 8	1	1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created
+EXPLAIN
 SELECT a, b, count(*) FROM
 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created
+EXPLAIN
 SELECT a, b, count(*) FROM
 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out Wed Apr 10 00:06:55 2013
@@ -249,12 +249,14 @@ POSTHOOK: Lineage: dest2.key EXPRESSION 
 10	1
 16	1
 18	1
-PREHOOK: query: explain
+PREHOOK: query: -- no need to spray by distinct key first
+explain
 from src
 insert overwrite table dest1 select key, count(distinct value) group by key
 insert overwrite table dest2 select key+key, count(distinct value) group by key+key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- no need to spray by distinct key first
+explain
 from src
 insert overwrite table dest1 select key, count(distinct value) group by key
 insert overwrite table dest2 select key+key, count(distinct value) group by key+key

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: create table e1 (key string, count int)
+PREHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved
+create table e1 (key string, count int)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table e1 (key string, count int)
+POSTHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved
+create table e1 (key string, count int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@e1
 PREHOOK: query: create table e2 (key string, count int)

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_position.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_position.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_position.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_position.q.out Wed Apr 10 00:06:55 2013
@@ -8,12 +8,16 @@ PREHOOK: type: CREATETABLE
 POSTHOOK: query: CREATE TABLE testTable2(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@testTable2
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Position Alias in GROUP BY and ORDER BY
+
+EXPLAIN
 FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 
 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- Position Alias in GROUP BY and ORDER BY
+
+EXPLAIN
 FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 
 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2
@@ -568,12 +572,16 @@ POSTHOOK: Lineage: testtable2.val2 EXPRE
 17	val_17	1
 18	val_18	1
 19	val_19	1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Position Alias in subquery
+
+EXPLAIN
 SELECT t.key, t.value
 FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t
 ORDER BY 2 DESC, 1 ASC
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- Position Alias in subquery
+
+EXPLAIN
 SELECT t.key, t.value
 FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t
 ORDER BY 2 DESC, 1 ASC

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_ppd.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_ppd.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_ppd.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_ppd.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: create table invites (id int, foo int, bar int)
+PREHOOK: query: -- see HIVE-2382
+create table invites (id int, foo int, bar int)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table invites (id int, foo int, bar int)
+POSTHOOK: query: -- see HIVE-2382
+create table invites (id int, foo int, bar int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@invites
 PREHOOK: query: explain select * from (select foo, bar from (select bar, foo from invites c union all select bar, foo from invites d) b) a group by bar, foo having bar=1

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out Wed Apr 10 00:06:55 2013
@@ -11,11 +11,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -28,11 +30,17 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key
+-- matches the sorted key
+-- addind a order by at the end to make the test results deterministic
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key
+-- matches the sorted key
+-- addind a order by at the end to make the test results deterministic
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 GROUP BY key
 POSTHOOK: type: QUERY
@@ -371,11 +379,13 @@ POSTHOOK: Lineage: outputtbl1.cnt EXPRES
 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 POSTHOOK: type: QUERY
@@ -596,11 +606,13 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 7	17	1
 8	18	1
 8	28	1
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- It should work for sub-queries
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- It should work for sub-queries
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key
 POSTHOOK: type: QUERY
@@ -985,11 +997,13 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 3	1
 7	1
 8	2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- It should work for sub-queries with column aliases
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- It should work for sub-queries with column aliases
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k
 POSTHOOK: type: QUERY
@@ -1396,11 +1410,15 @@ POSTHOOK: Lineage: outputtbl2.key1 EXPRE
 POSTHOOK: Lineage: outputtbl2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed
+-- by a match to the sorted key
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed
+-- by a match to the sorted key
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
@@ -1782,11 +1800,13 @@ POSTHOOK: Lineage: outputtbl3.key1 SIMPL
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
@@ -2049,11 +2069,13 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 7	1	17	1
 8	1	18	1
 8	1	28	1
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- no map-side group by if the group by key contains a function
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- no map-side group by if the group by key contains a function
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1
 POSTHOOK: type: QUERY
@@ -2323,13 +2345,21 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 3	4	1
 7	8	1
 8	9	2
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- it should not matter what follows the group by
+-- test various cases
+
+-- group by followed by another group by
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key + key, sum(cnt) from
 (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
 group by key + key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- it should not matter what follows the group by
+-- test various cases
+
+-- group by followed by another group by
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key + key, sum(cnt) from
 (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
@@ -2618,7 +2648,8 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 6	1
 14	1
 16	2
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- group by followed by a union
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT * FROM (
 SELECT key, count(1) FROM T1 GROUP BY key
@@ -2626,7 +2657,8 @@ SELECT key, count(1) FROM T1 GROUP BY ke
 SELECT key, count(1) FROM T1 GROUP BY key
 ) subq1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- group by followed by a union
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT * FROM (
 SELECT key, count(1) FROM T1 GROUP BY key
@@ -3146,7 +3178,8 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 7	1
 8	2
 8	2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT * FROM (
 SELECT key, count(1) FROM T1 GROUP BY key
@@ -3154,7 +3187,8 @@ SELECT key, count(1) FROM T1 GROUP BY ke
 SELECT key + key as key, count(1) FROM T1 GROUP BY key + key
 ) subq1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT * FROM (
 SELECT key, count(1) FROM T1 GROUP BY key
@@ -3794,7 +3828,8 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 8	2
 14	1
 16	2
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- group by followed by a join
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
 (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
@@ -3802,7 +3837,8 @@ JOIN
 (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2
 ON subq1.key = subq2.key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- group by followed by a join
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
 (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
@@ -4144,14 +4180,16 @@ POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1
 3	2
 7	2
 8	4
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper
+EXPLAIN EXTENDED 
 SELECT * FROM 
 (SELECT key, count(1) FROM T1 GROUP BY key) subq1
 JOIN
 (SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2
 ON subq1.key = subq2.key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper
+EXPLAIN EXTENDED 
 SELECT * FROM 
 (SELECT key, count(1) FROM T1 GROUP BY key) subq1
 JOIN
@@ -4535,11 +4573,13 @@ POSTHOOK: Lineage: outputtbl4.key2 SIMPL
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T2 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t2
-POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T2 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t2
@@ -4574,11 +4614,13 @@ POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T2 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T2 GROUP BY key
 POSTHOOK: type: QUERY
@@ -4875,11 +4917,15 @@ POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1
 3	1
 7	1
 8	2
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
@@ -5399,11 +5445,15 @@ POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED 
+PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys followed by anything
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl5
 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED 
+POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys followed by anything
+EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl5
 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
@@ -5861,13 +5911,15 @@ POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1
 7	1	17	2	1
 8	1	18	2	1
 8	1	28	2	1
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- contants from sub-queries should work fine
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, constant, val, count(1) from 
 (SELECT key, 1 as constant, val from T2)subq
 group by key, constant, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- contants from sub-queries should work fine
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4
 SELECT key, constant, val, count(1) from 
 (SELECT key, 1 as constant, val from T2)subq
@@ -6380,7 +6432,8 @@ POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1
 7	1	17	1
 8	1	18	1
 8	1	28	1
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- multiple levels of contants from sub-queries should work fine
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4
 select key, constant3, val, count(1) from
 (
@@ -6389,7 +6442,8 @@ SELECT key, constant as constant2, val, 
 )subq2
 group by key, constant3, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4
 select key, constant3, val, count(1) from
 (
@@ -7430,12 +7484,14 @@ POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1
 7	17	1
 8	18	1
 8	28	1
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- multi-table insert with a sub-query
+EXPLAIN
 FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
 INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- multi-table insert with a sub-query
+EXPLAIN
 FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
 INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_10.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_10.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_10.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_10.q.out Wed Apr 10 00:06:55 2013
@@ -5,21 +5,25 @@ POSTHOOK: query: CREATE TABLE T1(key STR
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@T1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1')
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1')
 SELECT * from src where key = 0 or key = 11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@t1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1')
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1')
 SELECT * from src where key = 0 or key = 11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@t1@ds=1
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN select distinct key from T1
+PREHOOK: query: -- The plan is converted to a map-side plan
+EXPLAIN select distinct key from T1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN select distinct key from T1
+POSTHOOK: query: -- The plan is converted to a map-side plan
+EXPLAIN select distinct key from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
@@ -80,12 +84,14 @@ POSTHOOK: Lineage: t1 PARTITION(ds=1).ke
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 0
 11
-PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2')
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='2')
 SELECT * from src where key = 0 or key = 11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@t1@ds=2
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2')
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='2')
 SELECT * from src where key = 0 or key = 11
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
@@ -94,9 +100,13 @@ POSTHOOK: Lineage: t1 PARTITION(ds=1).ke
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN select distinct key from T1
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN select distinct key from T1
+PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is querying multiple input partitions
+EXPLAIN select distinct key from T1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is querying multiple input partitions
+EXPLAIN select distinct key from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_2.q.out Wed Apr 10 00:06:55 2013
@@ -11,11 +11,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -28,11 +30,15 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should not be converted to a map-side group by even though the group by key
+-- matches the sorted key. Adding a order by at the end to make the test results deterministic
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT val, count(1) FROM T1 GROUP BY val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should not be converted to a map-side group by even though the group by key
+-- matches the sorted key. Adding a order by at the end to make the test results deterministic
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT val, count(1) FROM T1 GROUP BY val
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_3.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_3.q.out Wed Apr 10 00:06:55 2013
@@ -11,11 +11,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -28,11 +30,13 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should be converted to a map-side group by
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should be converted to a map-side group by
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 POSTHOOK: type: QUERY
@@ -193,11 +197,13 @@ POSTHOOK: Lineage: outputtbl1.key SIMPLE
 POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should be converted to a map-side group by
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, count(1) FROM T1 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should be converted to a map-side group by
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, count(1) FROM T1 GROUP BY key
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_4.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_4.q.out Wed Apr 10 00:06:55 2013
@@ -11,11 +11,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -28,11 +30,15 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should not be converted to a map-side group by.
+-- However, there should no hash-based aggregation on the map-side
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should not be converted to a map-side group by.
+-- However, there should no hash-based aggregation on the map-side
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 GROUP BY key
 POSTHOOK: type: QUERY
@@ -159,11 +165,15 @@ POSTHOOK: Lineage: outputtbl1.cnt EXPRES
 POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should not be converted to a map-side group by.
+-- Hash-based aggregations should be performed on the map-side
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should not be converted to a map-side group by.
+-- Hash-based aggregations should be performed on the map-side
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_5.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_5.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_5.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_5.q.out Wed Apr 10 00:06:55 2013
@@ -11,11 +11,13 @@ PREHOOK: Output: default@t1
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -28,11 +30,17 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 POSTHOOK: type: QUERY
@@ -219,11 +227,13 @@ POSTHOOK: Lineage: outputtbl1.key SIMPLE
 POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -234,11 +244,17 @@ POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 GROUP BY key, val
 POSTHOOK: type: QUERY
@@ -455,11 +471,13 @@ POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1
 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t1
@@ -492,11 +510,19 @@ POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should not be converted to a map-side group by, since although the
+-- sorting columns and grouping columns match, all the bucketing columns
+-- are not part of sorting columns. However, no hash map aggregation is required
+-- on the mapside.
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, count(1) FROM T1 GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should not be converted to a map-side group by, since although the
+-- sorting columns and grouping columns match, all the bucketing columns
+-- are not part of sorting columns. However, no hash map aggregation is required
+-- on the mapside.
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl2
 SELECT key, count(1) FROM T1 GROUP BY key
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_6.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_6.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_6.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_6.q.out Wed Apr 10 00:06:55 2013
@@ -8,11 +8,13 @@ PREHOOK: type: CREATETABLE
 POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key
 POSTHOOK: type: QUERY
@@ -164,11 +166,13 @@ POSTHOOK: Output: default@t1
 POSTHOOK: Output: default@t1@ds=2
 POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key
 POSTHOOK: type: QUERY
@@ -327,11 +331,15 @@ POSTHOOK: Lineage: outputtbl1.cnt EXPRES
 POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The plan should not be converted to a map-side group since the partition being accessed
+-- is neither bucketed not sorted
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The plan should not be converted to a map-side group since the partition being accessed
+-- is neither bucketed not sorted
+EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_7.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_7.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_7.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_7.q.out Wed Apr 10 00:06:55 2013
@@ -12,12 +12,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH 
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
 POSTHOOK: Output: default@t1@ds=1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t1@ds=1
 PREHOOK: Output: default@t1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t1@ds=1
@@ -31,11 +33,17 @@ POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@outputTbl1
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan should be converted to a map-side group by, since the
+-- sorting columns and grouping columns match, and all the bucketing columns
+-- are part of sorting columns
+EXPLAIN
 INSERT OVERWRITE TABLE outputTbl1
 SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val
 POSTHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_8.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_8.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_8.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_8.q.out Wed Apr 10 00:06:55 2013
@@ -12,22 +12,28 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH 
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
 POSTHOOK: Output: default@t1@ds=1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t1@ds=1
 PREHOOK: Output: default@t1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t1@ds=1
 POSTHOOK: Output: default@t1@ds=1
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is issueing a distinct
+EXPLAIN
 select count(distinct key) from T1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is issueing a distinct
+EXPLAIN
 select count(distinct key) from T1
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_9.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_9.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/groupby_sort_9.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/groupby_sort_9.q.out Wed Apr 10 00:06:55 2013
@@ -12,12 +12,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH 
 POSTHOOK: type: LOAD
 POSTHOOK: Output: default@t1
 POSTHOOK: Output: default@t1@ds=1
-PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+PREHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t1@ds=1
 PREHOOK: Output: default@t1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
+POSTHOOK: query: -- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t1@ds=1
@@ -38,10 +40,14 @@ POSTHOOK: Lineage: t1 PARTITION(ds=1).ke
 POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is querying multiple input partitions
+EXPLAIN
 select key, count(1) from T1 group by key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping
+-- columns match, the user is querying multiple input partitions
+EXPLAIN
 select key, count(1) from T1 group by key
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]