You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2017/02/03 21:50:33 UTC

[20/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2.q.out b/ql/src/test/results/beelinepositive/groupby2.q.out
deleted file mode 100644
index d9cc3bd..0000000
--- a/ql/src/test/results/beelinepositive/groupby2.q.out
+++ /dev/null
@@ -1,161 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2.dest_g2'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-124 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_limit.q.out b/ql/src/test/results/beelinepositive/groupby2_limit.q.out
deleted file mode 100644
index 10d928a..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_limit.q.out
+++ /dev/null
@@ -1,92 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_limit.q
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 5'
-''
-''
-72 rows selected 
->>>  
->>>  SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
-'key','_c1'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-5 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map.q.out b/ql/src/test/results/beelinepositive/groupby2_map.q.out
deleted file mode 100644
index 0a61980..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map.q.out
+++ /dev/null
@@ -1,139 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-102 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
deleted file mode 100644
index 6727a35..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
+++ /dev/null
@@ -1,155 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map_multi_distinct.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                      expr: sum(DISTINCT substr(value, 5))'
-'                      expr: count(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'                        expr: _col4'
-'                        type: double'
-'                        expr: _col5'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(VALUE._col3)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map_multi_distinct.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map_multi_distinct.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-118 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
deleted file mode 100644
index e338e47..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
+++ /dev/null
@@ -1,178 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-141 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_noskew.q.out b/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
deleted file mode 100644
index 89cb1a5..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
+++ /dev/null
@@ -1,122 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_noskew.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_noskew.dest_g2'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-85 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
deleted file mode 100644
index d604e1e..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
+++ /dev/null
@@ -1,135 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_noskew_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_noskew_multi_distinct.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:1._col0)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_noskew_multi_distinct.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_noskew_multi_distinct.dest_g2'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-98 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3.q.out b/ql/src/test/results/beelinepositive/groupby3.q.out
deleted file mode 100644
index 96c0a1f..0000000
--- a/ql/src/test/results/beelinepositive/groupby3.q.out
+++ /dev/null
@@ -1,204 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(KEY._col0:0._col0)'
-'                expr: avg(KEY._col0:0._col0)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(KEY._col0:0._col0)'
-'                expr: min(KEY._col0:0._col0)'
-'                expr: std(KEY._col0:0._col0)'
-'                expr: stddev_samp(KEY._col0:0._col0)'
-'                expr: variance(KEY._col0:0._col0)'
-'                expr: var_samp(KEY._col0:0._col0)'
-'          bucketGroup: false'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col2'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col6'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col7'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col8'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(VALUE._col2)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-158 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.92680950752379','143.06995106518903','20428.07287599999','20469.010897795582'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map.q.out b/ql/src/test/results/beelinepositive/groupby3_map.q.out
deleted file mode 100644
index d8df16a..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map.q.out
+++ /dev/null
@@ -1,190 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-142 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
deleted file mode 100644
index 8b9c85e..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
+++ /dev/null
@@ -1,208 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map_multi_distinct.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_F
 UNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                      expr: sum(DISTINCT substr(value, 5))'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col10'
-'                        type: double'
-'                        expr: _col11'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'                expr: sum(DISTINCT KEY._col0:1._col0)'
-'                expr: count(DISTINCT KEY._col0:2._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'                  expr: _col9'
-'                  type: double'
-'                  expr: _col10'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'                    expr: _col9'
-'                    type: double'
-'                    expr: UDFToDouble(_col10)'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map_multi_distinct.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map_multi_distinct.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-158 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8','_col9','_col10'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559','79136.0','309.0'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
deleted file mode 100644
index 249ba88..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
+++ /dev/null
@@ -1,242 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col2'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col6'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col7'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col8'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(VALUE._col2)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-194 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_noskew.q.out b/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
deleted file mode 100644
index 24e60ec..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
+++ /dev/null
@@ -1,156 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(KEY._col0:0._col0)'
-'                expr: avg(KEY._col0:0._col0)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(KEY._col0:0._col0)'
-'                expr: min(KEY._col0:0._col0)'
-'                expr: std(KEY._col0:0._col0)'
-'                expr: stddev_samp(KEY._col0:0._col0)'
-'                expr: variance(KEY._col0:0._col0)'
-'                expr: var_samp(KEY._col0:0._col0)'
-'          bucketGroup: false'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_noskew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-106 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.92680950752379','143.06995106518903','20428.07287599999','20469.010897795582'
-1 row selected 
->>>  
->>>  
->>>  
->>>  !record