You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/05/30 19:37:15 UTC

svn commit: r1598678 [9/31] - in /hive/trunk: cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/io/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/...

Modified: hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer7.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer7.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer7.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer7.q.out Fri May 30 17:37:05 2014
@@ -1,28 +1,32 @@
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
 FROM (SELECT x.key AS key, count(1) AS cnt
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
 FROM (SELECT x.key AS key, count(1) AS cnt
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-9 is a root stage
-  Stage-4 depends on stages: Stage-9
-  Stage-8 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-8
-  Stage-0 depends on stages: Stage-2
+  Stage-8 is a root stage
+  Stage-3 depends on stages: Stage-8
+  Stage-7 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-7
+  Stage-0 depends on stages: Stage-5
 
 STAGE PLANS:
-  Stage: Stage-9
+  Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
         xx:y 
@@ -41,7 +45,7 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -94,7 +98,7 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-8
+  Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
         yy 
@@ -113,7 +117,7 @@ STAGE PLANS:
                 0 _col0 (type: string)
                 1 key (type: string)
 
-  Stage: Stage-2
+  Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -132,23 +136,15 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
       Local Work:
         Map Reduce Local Work
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -161,7 +157,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -171,7 +167,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -197,7 +193,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
@@ -205,16 +201,15 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-6 is a root stage
-  Stage-1 depends on stages: Stage-6
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-5 is a root stage
+  Stage-1 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-6
+  Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
         xx:y 
@@ -306,10 +301,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 96 Data size: 9913 Basic stats: COMPLETE Column stats: NONE
             Join Operator
@@ -326,30 +322,11 @@ STAGE PLANS:
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -362,7 +339,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -372,7 +349,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -396,39 +373,39 @@ PREHOOK: query: -- Without correlation o
 -- The first one is a MapJoin and Aggregation (in the Reduce Phase).
 -- The second one is another MapJoin. The third one is for ordering.
 -- With the correlation optimizer, right now, we have
--- 2 MR jobs. The first one will evaluate the sub-query xx and the join of
--- xx and yy. The second one will do the ORDER BY.
+-- 1 MR jobs, evaluatinf the sub-query xx and the join of
+-- xx and yy.
 EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
 FROM (SELECT x.key AS key, count(1) AS cnt
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Without correlation optimizer, we will have 3 MR jobs.
 -- The first one is a MapJoin and Aggregation (in the Reduce Phase).
 -- The second one is another MapJoin. The third one is for ordering.
 -- With the correlation optimizer, right now, we have
--- 2 MR jobs. The first one will evaluate the sub-query xx and the join of
--- xx and yy. The second one will do the ORDER BY.
+-- 1 MR jobs, evaluatinf the sub-query xx and the join of
+-- xx and yy.
 EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
 FROM (SELECT x.key AS key, count(1) AS cnt
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-9 is a root stage
-  Stage-4 depends on stages: Stage-9
-  Stage-8 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-8
-  Stage-0 depends on stages: Stage-2
+  Stage-8 is a root stage
+  Stage-3 depends on stages: Stage-8
+  Stage-7 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-7
+  Stage-0 depends on stages: Stage-5
 
 STAGE PLANS:
-  Stage: Stage-9
+  Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
         xx:y 
@@ -447,7 +424,7 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -500,7 +477,7 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-8
+  Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
         yy 
@@ -519,7 +496,7 @@ STAGE PLANS:
                 0 _col0 (type: string)
                 1 key (type: string)
 
-  Stage: Stage-2
+  Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -538,23 +515,15 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
       Local Work:
         Map Reduce Local Work
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 34 Data size: 3459 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -567,7 +536,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -577,7 +546,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -603,7 +572,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT xx.key, xx.cnt, yy.key, yy.value
@@ -611,16 +580,15 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-6 is a root stage
-  Stage-1 depends on stages: Stage-6
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-5 is a root stage
+  Stage-1 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-6
+  Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
         xx:y 
@@ -712,10 +680,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 96 Data size: 9913 Basic stats: COMPLETE Column stats: NONE
             Join Operator
@@ -732,30 +701,11 @@ STAGE PLANS:
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -768,7 +718,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -778,7 +728,7 @@ FROM (SELECT x.key AS key, count(1) AS c
       FROM src x JOIN src1 y ON (x.key = y.key)
       GROUP BY x.key) xx
 JOIN src1 yy
-ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value
+ON xx.key=yy.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1

Modified: hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer8.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer8.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer8.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer8.q.out Fri May 30 17:37:05 2014
@@ -1,4 +1,6 @@
-PREHOOK: query: -- When the Correlation Optimizer is turned off, this query will be evaluated by
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- When the Correlation Optimizer is turned off, this query will be evaluated by
 -- 4 MR jobs. 
 -- When the Correlation Optimizer is turned on, because both inputs of the 
 -- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query.
@@ -11,9 +13,11 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
-POSTHOOK: query: -- When the Correlation Optimizer is turned off, this query will be evaluated by
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- When the Correlation Optimizer is turned off, this query will be evaluated by
 -- 4 MR jobs. 
 -- When the Correlation Optimizer is turned on, because both inputs of the 
 -- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query.
@@ -26,14 +30,13 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -124,35 +127,16 @@ STAGE PLANS:
             Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              sort order: +++
-              Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x1
             Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -204,7 +188,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -215,7 +199,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -240,7 +224,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT x.key, x.value, subq1.cnt
@@ -249,12 +233,11 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -344,10 +327,11 @@ STAGE PLANS:
                       Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Group By Operator
             aggregations: count(VALUE._col0)
             keys: KEY._col0 (type: string)
@@ -376,10 +360,11 @@ STAGE PLANS:
                       Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 77 Data size: 7940 Basic stats: COMPLETE Column stats: NONE
             Join Operator
@@ -396,30 +381,11 @@ STAGE PLANS:
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              sort order: +++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -433,7 +399,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -444,7 +410,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -475,7 +441,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- When the Correlation Optimizer is turned off, this query will be evaluated by
 -- 4 MR jobs. 
@@ -490,14 +456,13 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -588,35 +553,16 @@ STAGE PLANS:
             Statistics: Num rows: 9 Data size: 991 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 9 Data size: 991 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 9 Data size: 991 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 9 Data size: 991 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 9 Data size: 991 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x1
             Statistics: Num rows: 1 Data size: 216 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -668,7 +614,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -679,7 +625,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -724,7 +670,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT subq1.key, subq1.cnt, x.key, x.value
@@ -733,12 +679,11 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -828,10 +773,11 @@ STAGE PLANS:
                       Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Group By Operator
             aggregations: count(VALUE._col0)
             keys: KEY._col0 (type: string)
@@ -860,10 +806,11 @@ STAGE PLANS:
                       Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 40 Data size: 4237 Basic stats: COMPLETE Column stats: NONE
             Join Operator
@@ -880,30 +827,11 @@ STAGE PLANS:
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -917,7 +845,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
@@ -928,7 +856,7 @@ FROM 
      UNION ALL
   SELECT x1.value as key, count(1) as cnt from src1 x1 where x1.key > 100 group by x1.value
 ) subq1
-LEFT OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.cnt, x.key, x.value
+LEFT OUTER JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@src1
@@ -976,7 +904,7 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key, x1.value
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is
 -- not correlated, we cannot handle this case right now. So, this query will not be
@@ -988,14 +916,13 @@ FROM 
      UNION ALL
   SELECT x1.key as key, count(1) as cnt from src x1 where x1.key > 100 group by x1.key, x1.value
 ) subq1
-JOIN src1 x ON (x.key = subq1.key) ORDER BY x.key, x.value, subq1.cnt
+JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1086,35 +1013,16 @@ STAGE PLANS:
             Statistics: Num rows: 14 Data size: 1872 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 14 Data size: 1872 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              sort order: +++
-              Statistics: Num rows: 14 Data size: 1872 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 14 Data size: 1872 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 14 Data size: 1872 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x1
             Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -1170,7 +1078,7 @@ FROM 
      UNION ALL
   SELECT count(1) as key, cast(x1.key as INT) as value from src x1 where x1.key > 100 group by x1.key
 ) subq1
-FULL OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.value, x.key, x.value
+FULL OUTER JOIN src1 x ON (x.key = subq1.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is
 -- not correlated, we cannot handle this case right now. So, this query will not be
@@ -1182,14 +1090,13 @@ FROM 
      UNION ALL
   SELECT count(1) as key, cast(x1.key as INT) as value from src x1 where x1.key > 100 group by x1.key
 ) subq1
-FULL OUTER JOIN src1 x ON (x.key = subq1.key) ORDER BY subq1.key, subq1.value, x.key, x.value
+FULL OUTER JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1280,35 +1187,16 @@ STAGE PLANS:
             Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 19 Data size: 1982 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x1
             Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
             Filter Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer9.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer9.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer9.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer9.q.out Fri May 30 17:37:05 2014
@@ -1,7 +1,11 @@
-PREHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING)
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-POSTHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING)
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tmp
@@ -25,7 +29,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT xx.key, yy.key, xx.cnt, yy.cnt
@@ -33,14 +37,13 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -118,35 +121,16 @@ STAGE PLANS:
             Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint)
-              sort order: ++++
-              Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x
             Statistics: Num rows: 1028 Data size: 22964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -197,7 +181,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -206,7 +190,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -226,7 +210,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The merged table scan should be able to load both c1 and c2
 EXPLAIN
@@ -235,12 +219,11 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -319,10 +302,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Group By Operator
             aggregations: count(VALUE._col0)
             keys: KEY._col0 (type: int)
@@ -349,30 +333,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint)
-              sort order: ++++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -385,7 +350,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -394,7 +359,7 @@ FROM 
 (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx
 JOIN
 (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy
-ON (xx.key = yy.key) ORDER BY xx.key, yy.key, xx.cnt, yy.cnt
+ON (xx.key = yy.key)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -413,7 +378,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
@@ -421,14 +386,13 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -506,35 +470,16 @@ STAGE PLANS:
             Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
+              Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: bigint), _col5 (type: bigint)
-              sort order: ++++++
-              Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: bigint), _col5 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 188 Data size: 4200 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: x
             Statistics: Num rows: 1028 Data size: 22964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -585,7 +530,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -594,7 +539,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -613,7 +558,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN
 SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
@@ -621,12 +566,11 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -705,10 +649,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Group By Operator
             aggregations: count(VALUE._col0)
             keys: KEY._col0 (type: int), KEY._col1 (type: string)
@@ -735,30 +680,11 @@ STAGE PLANS:
                     Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                       table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: bigint), _col5 (type: bigint)
-              sort order: ++++++
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: bigint), _col5 (type: bigint)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -771,7 +697,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp
 #### A masked pattern was here ####
@@ -780,7 +706,7 @@ FROM 
 (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx
 JOIN
 (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy
-ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt
+ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tmp
 #### A masked pattern was here ####

Modified: hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out Fri May 30 17:37:05 2014
@@ -1,6 +1,10 @@
-PREHOOK: query: DROP TABLE IF EXISTS table1
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE IF EXISTS table1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS table1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE IF EXISTS table1
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: DROP TABLE IF EXISTS table2
 PREHOOK: type: DROPTABLE
@@ -188,20 +192,20 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@table2
 POSTHOOK: Lineage: table2.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: table2.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM table1 order by a, b
+PREHOOK: query: SELECT * FROM table1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table1
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM table1 order by a, b
+POSTHOOK: query: SELECT * FROM table1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@table1
 #### A masked pattern was here ####
 86	val_86
-PREHOOK: query: SELECT * FROM table2 order by a, b
+PREHOOK: query: SELECT * FROM table2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table2
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM table2 order by a, b
+POSTHOOK: query: SELECT * FROM table2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@table2
 #### A masked pattern was here ####

Modified: hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out Fri May 30 17:37:05 2014
@@ -1,12 +1,14 @@
-PREHOOK: query: -- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table
--- use internal name as column name instead
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table
 
 -- group by
 explain
 create table summary as select *, sum(key), count(value) from src
 PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: -- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table
--- use internal name as column name instead
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table
 
 -- group by
 explain
@@ -119,11 +121,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from summary order by `_col0`, `_col1`, `_c1`, `_c2`
+PREHOOK: query: select * from summary
 PREHOOK: type: QUERY
 PREHOOK: Input: default@summary
 #### A masked pattern was here ####
-POSTHOOK: query: select * from summary order by `_col0`, `_col1`, `_c1`, `_c2`
+POSTHOOK: query: select * from summary
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@summary
 #### A masked pattern was here ####
@@ -234,11 +236,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x4 order by key, value, rr
+PREHOOK: query: select * from x4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x4
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x4 order by key, value, rr
+POSTHOOK: query: select * from x4
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x4
 #### A masked pattern was here ####
@@ -396,33 +398,33 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x5 order by key, value, lead1
+PREHOOK: query: select * from x5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x5
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x5 order by key, value, lead1
+POSTHOOK: query: select * from x5
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x5
 #### A masked pattern was here ####
-0	val_0	NULL
 0	val_0	0
 0	val_0	0
+0	val_0	NULL
 10	val_10	NULL
-100	val_100	NULL
 100	val_100	100
-103	val_103	NULL
+100	val_100	NULL
 103	val_103	103
-104	val_104	NULL
+103	val_103	NULL
 104	val_104	104
+104	val_104	NULL
 105	val_105	NULL
 11	val_11	NULL
 111	val_111	NULL
-113	val_113	NULL
 113	val_113	113
+113	val_113	NULL
 114	val_114	NULL
 116	val_116	NULL
-118	val_118	NULL
 118	val_118	118
+118	val_118	NULL
 119	val_119	119
 PREHOOK: query: -- sub queries
 explain
@@ -537,11 +539,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x6 order by `_col0`, `_c1`
+PREHOOK: query: select * from x6
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x6
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x6 order by `_col0`, `_c1`
+POSTHOOK: query: select * from x6
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x6
 #### A masked pattern was here ####
@@ -657,11 +659,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x7 order by `_col0`
+PREHOOK: query: select * from x7
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x7
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x7 order by `_col0`
+POSTHOOK: query: select * from x7
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x7
 #### A masked pattern was here ####
@@ -1088,11 +1090,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x8 order by `_col0`
+PREHOOK: query: select * from x8
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x8
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x8 order by `_col0`
+POSTHOOK: query: select * from x8
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x8
 #### A masked pattern was here ####
@@ -1222,11 +1224,11 @@ Bucket Columns:     	[]                 
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from x9 order by key, `_c0`
+PREHOOK: query: select * from x9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@x9
 #### A masked pattern was here ####
-POSTHOOK: query: select * from x9 order by key, `_c0`
+POSTHOOK: query: select * from x9
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@x9
 #### A masked pattern was here ####

Modified: hive/trunk/ql/src/test/results/clientpositive/custom_input_output_format.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/custom_input_output_format.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/custom_input_output_format.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/custom_input_output_format.q.out Fri May 30 17:37:05 2014
@@ -1,9 +1,13 @@
-PREHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) 
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src1_rot13_iof(key STRING, value STRING)
   STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
             OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) 
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src1_rot13_iof(key STRING, value STRING)
   STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
             OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
 POSTHOOK: type: CREATETABLE
@@ -19,11 +23,11 @@ key                 	string             
 value               	string              	                    
 	 	 
 #### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM src1 ORDER BY key, value
+PREHOOK: query: SELECT * FROM src1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src1 ORDER BY key, value
+POSTHOOK: query: SELECT * FROM src1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 #### A masked pattern was here ####
@@ -62,11 +66,11 @@ POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@src1_rot13_iof
 POSTHOOK: Lineage: src1_rot13_iof.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: src1_rot13_iof.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM src1_rot13_iof ORDER BY key, value
+PREHOOK: query: SELECT * FROM src1_rot13_iof
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1_rot13_iof
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src1_rot13_iof ORDER BY key, value
+POSTHOOK: query: SELECT * FROM src1_rot13_iof
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1_rot13_iof
 #### A masked pattern was here ####