You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/01/09 04:13:09 UTC

svn commit: r1650435 - in /hive/branches/spark: ./ hbase-handler/ ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/ ql/src/test/results/clientpositive/tez/

Author: xuefu
Date: Fri Jan  9 03:13:09 2015
New Revision: 1650435

URL: http://svn.apache.org/r1650435
Log:
HIVE-9290: Make some test results deterministic (Rui via Xuefu)
merged from trunk, r1650434

Modified:
    hive/branches/spark/   (props changed)
    hive/branches/spark/hbase-handler/pom.xml   (props changed)
    hive/branches/spark/ql/src/test/queries/clientpositive/limit_pushdown.q
    hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ppd_gby_join.q
    hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q
    hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/optimize_nullscan.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_gby_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out

Propchange: hive/branches/spark/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan  9 03:13:09 2015
@@ -2,4 +2,4 @@
 /hive/branches/cbo:1605012-1627125
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856
-/hive/trunk:1608589-1649690
+/hive/trunk:1608589-1649690,1650434

Propchange: hive/branches/spark/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan  9 03:13:09 2015
@@ -2,4 +2,4 @@
 /hive/branches/cbo/hbase-handler/pom.xml:1605012-1627125
 /hive/branches/tez/hbase-handler/pom.xml:1494760-1622766
 /hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856
-/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1608589-1633422,1633911,1634262,1634442,1634636,1634946,1636885,1636888,1637521,1641875,1642127,1642148,1643125,1644171,1644717,1644764,1644780,1646994,1648397,1648457
+/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1608589-1633422,1633911,1634262,1634442,1634636,1634946,1636885,1636888,1637521,1641875,1642127,1642148,1643125,1644171,1644717,1644764,1644780,1646994,1648397,1648457,1650434

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/limit_pushdown.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/limit_pushdown.q?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/limit_pushdown.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/limit_pushdown.q Fri Jan  9 03:13:09 2015
@@ -24,8 +24,8 @@ select value,avg(key + 1) from src group
 
 -- distincts
 explain
-select distinct(cdouble) from alltypesorc limit 20;
-select distinct(cdouble) from alltypesorc limit 20;
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20;
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20;
 
 explain
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
@@ -69,5 +69,5 @@ select key,value,value,value,value,value
 
 -- flush for group-by
 explain
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100;
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100;
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100;
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/optimize_nullscan.q Fri Jan  9 03:13:09 2015
@@ -1,5 +1,5 @@
 set hive.cbo.enable=false;
--- SORT_BEFORE_DIFF
+-- SORT_QUERY_RESULTS
 
 -- Disable CBO here, because it messes with the cases specifically crafted for the optimizer.
 -- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join.

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ppd_gby_join.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ppd_gby_join.q?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ppd_gby_join.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ppd_gby_join.q Fri Jan  9 03:13:09 2015
@@ -1,6 +1,8 @@
 set hive.optimize.ppd=true;
 set hive.ppd.remove.duplicatefilters=false;
 
+-- SORT_QUERY_RESULTS
+
 EXPLAIN
 SELECT src1.c1, count(1) 
 FROM

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q Fri Jan  9 03:13:09 2015
@@ -88,9 +88,11 @@ EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50;
 
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50;

Modified: hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out Fri Jan  9 03:13:09 2015
@@ -344,11 +344,11 @@ val_129	130.0
 val_131	132.0
 PREHOOK: query: -- distincts
 explain
-select distinct(cdouble) from alltypesorc limit 20
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 PREHOOK: type: QUERY
 POSTHOOK: query: -- distincts
 explain
-select distinct(cdouble) from alltypesorc limit 20
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -403,11 +403,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select distinct(cdouble) from alltypesorc limit 20
+PREHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-POSTHOOK: query: select distinct(cdouble) from alltypesorc limit 20
+POSTHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
@@ -1175,15 +1175,16 @@ POSTHOOK: Input: default@src
 187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
 PREHOOK: query: -- flush for group-by
 explain
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 PREHOOK: type: QUERY
 POSTHOOK: query: -- flush for group-by
 explain
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1201,7 +1202,6 @@ STAGE PLANS:
                 sort order: +
                 Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                TopN Hash Memory Usage: 2.0E-5
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
@@ -1214,16 +1214,37 @@ STAGE PLANS:
             expressions: _col1 (type: double)
             outputColumnNames: _col0
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 100
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: double)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 2.0E-5
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: double)
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 100
+            Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
               Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1231,29 +1252,35 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 0.0
 10.0
+102.0
+105.0
 105.0
 11.0
 111.0
 114.0
-1150.0
+116.0
 116.0
 126.0
 131.0
 133.0
+134.0
 136.0
 143.0
+144.0
 145.0
+15.0
 150.0
+152.0
 153.0
 155.0
 156.0
@@ -1263,6 +1290,8 @@ POSTHOOK: Input: default@src
 162.0
 163.0
 166.0
+166.0
+168.0
 168.0
 17.0
 170.0
@@ -1275,67 +1304,59 @@ POSTHOOK: Input: default@src
 189.0
 19.0
 190.0
+190.0
 192.0
 194.0
+194.0
 196.0
+196.0
+2.0
 20.0
 200.0
 201.0
 202.0
 206.0
 208.0
+210.0
 214.0
 218.0
 222.0
 226.0
 226.0
 228.0
-235.0
-236.0
 24.0
-240.0
-250.0
-258.0
-268.0
-274.0
-292.0
-298.0
+27.0
+28.0
+30.0
 30.0
-304.0
-328.0
-330.0
-344.0
-348.0
-350.0
-352.0
-357.0
-358.0
+33.0
+34.0
 36.0
-382.0
-384.0
-390.0
-394.0
-400.0
-406.0
-410.0
-414.0
-418.0
-426.0
-432.0
-434.0
-438.0
-442.0
-446.0
-448.0
-458.0
-466.0
-474.0
-476.0
-478.0
-501.0
-552.0
-561.0
-579.0
-597.0
-624.0
-676.0
+4.0
+41.0
+43.0
+44.0
+47.0
+48.0
+52.0
+53.0
+54.0
+57.0
+64.0
+65.0
+66.0
+69.0
+74.0
+74.0
+77.0
+78.0
+8.0
+80.0
+82.0
+84.0
+85.0
+86.0
+87.0
+9.0
+92.0
+96.0

Modified: hive/branches/spark/ql/src/test/results/clientpositive/optimize_nullscan.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/optimize_nullscan.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/optimize_nullscan.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/optimize_nullscan.q.out Fri Jan  9 03:13:09 2015
@@ -1,4 +1,4 @@
-PREHOOK: query: -- SORT_BEFORE_DIFF
+PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Disable CBO here, because it messes with the cases specifically crafted for the optimizer.
 -- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join.
@@ -6,7 +6,7 @@ PREHOOK: query: -- SORT_BEFORE_DIFF
 explain extended 
 select key from src where false
 PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_BEFORE_DIFF
+POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Disable CBO here, because it messes with the cases specifically crafted for the optimizer.
 -- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join.
@@ -1218,8 +1218,8 @@ POSTHOOK: Input: default@srcpart@ds=2008
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
-2000
 0
+2000
 Warning: Shuffle Join JOIN[11][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended
 select * from (select key from src where false) a left outer join (select value from srcpart limit 0) b

Modified: hive/branches/spark/ql/src/test/results/clientpositive/ppd_gby_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/ppd_gby_join.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/ppd_gby_join.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/ppd_gby_join.q.out Fri Jan  9 03:13:09 2015
@@ -1,4 +1,6 @@
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.c1, count(1) 
 FROM
 (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1
@@ -8,7 +10,9 @@ ON src1.c1 = src2.c3 AND src1.c1 < '400'
 WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4')
 GROUP BY src1.c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.c1, count(1) 
 FROM
 (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out Fri Jan  9 03:13:09 2015
@@ -368,11 +368,11 @@ val_129	130.0
 val_131	132.0
 PREHOOK: query: -- distincts
 explain
-select distinct(cdouble) from alltypesorc limit 20
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 PREHOOK: type: QUERY
 POSTHOOK: query: -- distincts
 explain
-select distinct(cdouble) from alltypesorc limit 20
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -433,11 +433,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select distinct(cdouble) from alltypesorc limit 20
+PREHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-POSTHOOK: query: select distinct(cdouble) from alltypesorc limit 20
+POSTHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
@@ -1203,11 +1203,11 @@ POSTHOOK: Input: default@src
 187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
 PREHOOK: query: -- flush for group-by
 explain
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 PREHOOK: type: QUERY
 POSTHOOK: query: -- flush for group-by
 explain
-select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1218,6 +1218,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1234,7 +1235,6 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      TopN Hash Memory Usage: 2.0E-5
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -1248,16 +1248,27 @@ STAGE PLANS:
                   expressions: _col1 (type: double)
                   outputColumnNames: _col0
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Limit
-                    Number of rows: 100
+                  Reduce Output Operator
+                    key expressions: _col0 (type: double)
+                    sort order: +
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 2.0E-5
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 100
+                  Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1265,29 +1276,35 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) limit 100
+POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 0.0
 10.0
+102.0
+105.0
 105.0
 11.0
 111.0
 114.0
-1150.0
+116.0
 116.0
 126.0
 131.0
 133.0
+134.0
 136.0
 143.0
+144.0
 145.0
+15.0
 150.0
+152.0
 153.0
 155.0
 156.0
@@ -1297,6 +1314,8 @@ POSTHOOK: Input: default@src
 162.0
 163.0
 166.0
+166.0
+168.0
 168.0
 17.0
 170.0
@@ -1309,67 +1328,59 @@ POSTHOOK: Input: default@src
 189.0
 19.0
 190.0
+190.0
 192.0
 194.0
+194.0
 196.0
+196.0
+2.0
 20.0
 200.0
 201.0
 202.0
 206.0
 208.0
+210.0
 214.0
 218.0
 222.0
 226.0
 226.0
 228.0
-235.0
-236.0
 24.0
-240.0
-250.0
-258.0
-268.0
-274.0
-292.0
-298.0
+27.0
+28.0
+30.0
 30.0
-304.0
-328.0
-330.0
-344.0
-348.0
-350.0
-352.0
-357.0
-358.0
+33.0
+34.0
 36.0
-382.0
-384.0
-390.0
-394.0
-400.0
-406.0
-410.0
-414.0
-418.0
-426.0
-432.0
-434.0
-438.0
-442.0
-446.0
-448.0
-458.0
-466.0
-474.0
-476.0
-478.0
-501.0
-552.0
-561.0
-579.0
-597.0
-624.0
-676.0
+4.0
+41.0
+43.0
+44.0
+47.0
+48.0
+52.0
+53.0
+54.0
+57.0
+64.0
+65.0
+66.0
+69.0
+74.0
+74.0
+77.0
+78.0
+8.0
+80.0
+82.0
+84.0
+85.0
+86.0
+87.0
+9.0
+92.0
+96.0

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out Fri Jan  9 03:13:09 2015
@@ -1,4 +1,4 @@
-PREHOOK: query: -- SORT_BEFORE_DIFF
+PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Disable CBO here, because it messes with the cases specifically crafted for the optimizer.
 -- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join.
@@ -6,7 +6,7 @@ PREHOOK: query: -- SORT_BEFORE_DIFF
 explain extended 
 select key from src where false
 PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_BEFORE_DIFF
+POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Disable CBO here, because it messes with the cases specifically crafted for the optimizer.
 -- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join.

Modified: hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out Fri Jan  9 03:13:09 2015
@@ -269,12 +269,14 @@ PREHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -286,6 +288,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -315,20 +318,27 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string)
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                  Limit
-                    Number of rows: 50
+            Execution mode: vectorized
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 50
+                  Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
 
   Stage: Stage-0
@@ -340,6 +350,7 @@ STAGE PLANS:
 PREHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
 PREHOOK: Input: default@vectortab2korc
@@ -347,6 +358,7 @@ PREHOOK: Input: default@vectortab2korc
 POSTHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out?rev=1650435&r1=1650434&r2=1650435&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out Fri Jan  9 03:13:09 2015
@@ -285,17 +285,20 @@ PREHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -325,20 +328,36 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0
           Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 50
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 50
+            Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
               Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -349,6 +368,7 @@ STAGE PLANS:
 PREHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
 PREHOOK: Input: default@vectortab2korc
@@ -356,6 +376,7 @@ PREHOOK: Input: default@vectortab2korc
 POSTHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
+    ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vectortab2korc