You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/05/30 19:37:15 UTC

svn commit: r1598678 [5/31] - in /hive/trunk: cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/io/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/...

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_7.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_7.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_7.q Fri May 30 17:37:05 2014
@@ -9,6 +9,7 @@ set hive.merge.mapredfiles=false;
 set hive.merge.rcfile.block.level=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
 
 -- list bucketing DML : dynamic partition (one level) , merge , one skewed column
 -- DML without merge files mixed with small and big files:
@@ -64,7 +65,7 @@ set hive.input.format=org.apache.hadoop.
 explain extended
 select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484';
 select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484';
-select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr;
+select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484';
 
 -- clean up
 drop table list_bucketing_dynamic_part;

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_9.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_9.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_dml_9.q Fri May 30 17:37:05 2014
@@ -8,6 +8,7 @@ set hive.merge.mapfiles=false;	
 set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
 
 -- list bucketing DML: static partition. multiple skewed columns. merge.
 -- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
@@ -63,9 +64,9 @@ select count(*) from list_bucketing_stat
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.optimize.listbucketing=true;
 explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484';
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484';
+select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484';
 
 -- clean up
 drop table list_bucketing_static_part;

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q Fri May 30 17:37:05 2014
@@ -4,6 +4,7 @@ set mapred.input.dir.recursive=true;	
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)	
+-- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. We simulate the directory structure by DML here.
 -- Test condition: 
@@ -30,21 +31,21 @@ SELECT count(1) FROM fact_daily WHERE ds
 
 -- pruner only pick up default directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' ORDER BY key, value;
+explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484';
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' ORDER BY key, value;
+SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484';
 
 -- pruner only pick up default directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' ORDER BY key;
+explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406';
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' ORDER BY key;
+SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406';
 
 -- pruner only pick up skewed-value directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484')  or (key='238' and value= 'val_238')) ORDER BY key, value;
+explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484')  or (key='238' and value= 'val_238'));
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484')  or (key='238' and value= 'val_238')) ORDER BY key, value;
+SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484')  or (key='238' and value= 'val_238'));
 
 -- clean up
 drop table fact_daily;

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q Fri May 30 17:37:05 2014
@@ -4,6 +4,7 @@ set mapred.input.dir.recursive=true;	
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)	
+-- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 
 -- Test condition: 
@@ -44,7 +45,7 @@ LOCATION '${hiveconf:hive.metastore.ware
 alter table fact_daily PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
 describe formatted fact_daily PARTITION (ds = '1');
 	
-SELECT * FROM fact_daily WHERE ds='1' ORDER BY x;	
+SELECT * FROM fact_daily WHERE ds='1';
 
 -- pruner only pick up skewed-value directory
 -- explain plan shows which directory selected: Truncated Path -> Alias

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q Fri May 30 17:37:05 2014
@@ -4,6 +4,7 @@ set mapred.input.dir.recursive=true;	
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;	
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)	
+-- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 
 -- Test condition: 
@@ -45,7 +46,7 @@ LOCATION '${hiveconf:hive.metastore.ware
 alter table fact_daily PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
 describe formatted fact_daily PARTITION (ds = '1');
 	
-SELECT * FROM fact_daily WHERE ds='1' ORDER BY x, y;
+SELECT * FROM fact_daily WHERE ds='1';
 
 -- The first subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
@@ -55,9 +56,9 @@ select x from (select * from fact_daily 
 
 -- The second subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 ORDER BY x1, y1;
+explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484;
 -- List Bucketing Query
-select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 ORDER BY x1, y1;
+select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484;
 
 
 -- The third subquery

Modified: hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q Fri May 30 17:37:05 2014
@@ -4,6 +4,7 @@ set hive.input.format=org.apache.hadoop.
 set hive.mapred.supports.subdirectories=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)	
+-- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 
 -- Test condition: 
@@ -52,10 +53,10 @@ alter table fact_daily PARTITION (ds = '
 'HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
 describe formatted fact_daily PARTITION (ds = '1');
 	
-SELECT * FROM fact_daily WHERE ds='1' ORDER BY x, y, z;	
+SELECT * FROM fact_daily WHERE ds='1';
 
 -- pruner  pick up right directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) ORDER BY x;
+explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86);
 -- List Bucketing Query
-SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) ORDER BY x;
+SELECT x FROM fact_daily WHERE ds='1' and not (x = 86);

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part1.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part1.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -23,8 +25,8 @@ insert overwrite table nzhang_part2 part
 show partitions nzhang_part1;
 show partitions nzhang_part2;
 
-select * from nzhang_part1 where ds is not null and hr is not null order by ds, hr, key;
-select * from nzhang_part2 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part1 where ds is not null and hr is not null;
+select * from nzhang_part2 where ds is not null and hr is not null;
 
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part10.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part10.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part10.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part10.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -19,6 +21,6 @@ insert overwrite table nzhang_part10 par
 
 show partitions nzhang_part10;
 
-select * from nzhang_part10 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part10 where ds is not null and hr is not null;
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part14.q Fri May 30 17:37:05 2014
@@ -1,7 +1,9 @@
 -- EXCLUDE_OS_WINDOWS
 -- excluded on windows because of difference in file name encoding logic
 
-create table if not exists nzhang_part14 (key string) 
+-- SORT_QUERY_RESULTS
+
+create table if not exists nzhang_part14 (key string)
   partitioned by (value string);
 
 describe extended nzhang_part14;
@@ -31,7 +33,6 @@ select key, value from (
 
 show partitions nzhang_part14;
 
-select * from nzhang_part14 where value <> 'a'
-order by key, value;
+select * from nzhang_part14 where value <> 'a';
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part2.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part2.q Fri May 30 17:37:05 2014
@@ -1,3 +1,4 @@
+-- SORT_QUERY_RESULTS
 
 create table if not exists nzhang_part_bucket (key string, value string) 
   partitioned by (ds string, hr string) 
@@ -16,8 +17,8 @@ insert overwrite table nzhang_part_bucke
 
 show partitions nzhang_part_bucket;
 
-select * from nzhang_part_bucket where ds='2010-03-23' and hr='11' order by key;
-select * from nzhang_part_bucket where ds='2010-03-23' and hr='12' order by key;
+select * from nzhang_part_bucket where ds='2010-03-23' and hr='11';
+select * from nzhang_part_bucket where ds='2010-03-23' and hr='12';
 
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part3.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part3.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part3.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -14,6 +16,6 @@ insert overwrite table nzhang_part3 part
 
 insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null;
 
-select * from nzhang_part3 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part3 where ds is not null and hr is not null;
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part4.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part4.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part4.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -17,8 +19,8 @@ insert overwrite table nzhang_part4 part
 insert overwrite table nzhang_part4 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null;
 
 show partitions nzhang_part4;
-select * from nzhang_part4 where ds='2008-04-08' and hr is not null order by ds, hr, key;
+select * from nzhang_part4 where ds='2008-04-08' and hr is not null;
 
-select * from nzhang_part4 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part4 where ds is not null and hr is not null;
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part8.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part8.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part8.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part8.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -20,5 +22,5 @@ insert overwrite table nzhang_part8 part
 
 show partitions nzhang_part8;
 
-select * from nzhang_part8 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part8 where ds is not null and hr is not null;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part9.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part9.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part9.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/load_dyn_part9.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
@@ -19,5 +21,5 @@ insert overwrite table nzhang_part9 part
 
 show partitions nzhang_part9;
 
-select * from nzhang_part9 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part9 where ds is not null and hr is not null;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/mapjoin_memcheck.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/mapjoin_memcheck.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/mapjoin_memcheck.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/mapjoin_memcheck.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 
 set hive.auto.convert.join = true;
 
+-- SORT_QUERY_RESULTS
+
 create table src0 like src;
 insert into table src0 select * from src where src.key < 10;
 
@@ -8,9 +10,9 @@ set hive.mapjoin.check.memory.rows=1;
 
 explain 
 select src1.key as k1, src1.value as v1, src2.key, src2.value
-from src0 src1 inner join src0 src2 on src1.key = src2.key order by k1, v1;
+from src0 src1 inner join src0 src2 on src1.key = src2.key;
 
 select src1.key as k1, src1.value as v1, src2.key, src2.value
-from src0 src1 inner join src0 src2 on src1.key = src2.key order by k1, v1;
+from src0 src1 inner join src0 src2 on src1.key = src2.key;
 
 drop table src0;
\ No newline at end of file

Modified: hive/trunk/ql/src/test/queries/clientpositive/mapjoin_subquery.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/mapjoin_subquery.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/mapjoin_subquery.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/mapjoin_subquery.q Fri May 30 17:37:05 2014
@@ -2,6 +2,7 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 
+-- SORT_QUERY_RESULTS
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN
@@ -15,20 +16,17 @@ SELECT subq.key1, z.value
 FROM
 (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
  FROM src1 x JOIN src y ON (x.key = y.key)) subq
- JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)
-ORDER BY subq.key1, z.value;
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11);
  
 EXPLAIN 
 SELECT subq.key1, z.value
 FROM
 (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
  FROM src1 x JOIN src y ON (x.key = y.key)) subq
- JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) 
- order by subq.key1, z.value;
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11);
 
 SELECT subq.key1, z.value
 FROM
 (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2  
  FROM src1 x JOIN src y ON (x.key = y.key)) subq 
- JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)  
- order by subq.key1, z.value;
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11);

Modified: hive/trunk/ql/src/test/queries/clientpositive/merge3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/merge3.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/merge3.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/merge3.q Fri May 30 17:37:05 2014
@@ -4,7 +4,9 @@ set hive.input.format=org.apache.hadoop.
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
-create table merge_src as 
+-- SORT_QUERY_RESULTS
+
+create table merge_src as
 select key, value from srcpart where ds is not null;
 
 create table merge_src_part (key string, value string) partitioned by (ds string);
@@ -20,7 +22,7 @@ select key, value from merge_src;
 create table merge_src2 as 
 select key, value from merge_src;
 
-select * from merge_src2 ORDER BY key ASC, value ASC;
+select * from merge_src2;
 describe formatted merge_src2;
 
 create table merge_src_part2 like merge_src_part;
@@ -37,7 +39,7 @@ where ds is not null;
 
 show partitions merge_src_part2;
 
-select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC, ds ASC;
+select * from merge_src_part2 where ds is not null;
 
 drop table merge_src_part2;
 
@@ -54,4 +56,4 @@ select key, value, ds;
 
 show partitions merge_src_part2;
 
-select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC, ds ASC;
+select * from merge_src_part2 where ds is not null;

Modified: hive/trunk/ql/src/test/queries/clientpositive/merge4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/merge4.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/merge4.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/merge4.q Fri May 30 17:37:05 2014
@@ -4,6 +4,8 @@ set hive.input.format=org.apache.hadoop.
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
+-- SORT_QUERY_RESULTS
+
 create table nzhang_part like srcpart;
 
 explain
@@ -11,14 +13,14 @@ insert overwrite table nzhang_part parti
 
 insert overwrite table nzhang_part partition (ds='2010-08-15', hr) select key, value, hr from srcpart where ds='2008-04-08';
 
-select * from nzhang_part ORDER BY key, value, ds, hr;
+select * from nzhang_part;
 
 explain
 insert overwrite table nzhang_part partition (ds='2010-08-15', hr=11) select key, value from srcpart where ds='2008-04-08';
 
 insert overwrite table nzhang_part partition (ds='2010-08-15', hr=11) select key, value from srcpart where ds='2008-04-08';
 
-select * from nzhang_part ORDER BY key, value, ds, hr;
+select * from nzhang_part;
 
 explain
 insert overwrite table nzhang_part partition (ds='2010-08-15', hr) 

Modified: hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
+-- SORT_QUERY_RESULTS
+
 create table srcpart_merge_dp like srcpart;
 
 create table merge_dynamic_part like srcpart;
@@ -19,7 +21,7 @@ explain
 insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08';
 insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08';
 
-select * from merge_dynamic_part order by key, value;
+select * from merge_dynamic_part;
 show table extended like `merge_dynamic_part`;
 
 
@@ -31,7 +33,7 @@ explain
 insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08';
 insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08';
 
-select * from merge_dynamic_part order by key, value;
+select * from merge_dynamic_part;
 show table extended like `merge_dynamic_part`;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
@@ -42,6 +44,6 @@ explain
 insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11;
 insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11;;
 
-select * from merge_dynamic_part order by key, value;
+select * from merge_dynamic_part;
 show table extended like `merge_dynamic_part`;
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
+-- SORT_QUERY_RESULTS
+
 create table srcpart_merge_dp like srcpart;
 
 create table merge_dynamic_part like srcpart;
@@ -33,6 +35,6 @@ insert overwrite table merge_dynamic_par
 
 insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08';
 
-select ds, hr, count(1) from merge_dynamic_part where ds>='2008-04-08' group by ds, hr order by ds, hr;
+select ds, hr, count(1) from merge_dynamic_part where ds>='2008-04-08' group by ds, hr;
 
 show table extended like `merge_dynamic_part`;

Modified: hive/trunk/ql/src/test/queries/clientpositive/mi.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/mi.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/mi.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/mi.q Fri May 30 17:37:05 2014
@@ -1,5 +1,8 @@
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- SORT_QUERY_RESULTS
+
 create table nzhang_t1 like srcpart;
 create table nzhang_t2 like srcpart;
 
@@ -15,7 +18,7 @@ GROUP BY key, value, ds, hr;
 show partitions nzhang_t1;
 show partitions nzhang_t2;
 
-select * from nzhang_t1 order by key, value;
-select * from nzhang_t2 order by key, value;
+select * from nzhang_t1;
+select * from nzhang_t2;
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/multi_insert.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/multi_insert.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/multi_insert.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/multi_insert.q Fri May 30 17:37:05 2014
@@ -1,4 +1,4 @@
-
+-- SORT_QUERY_RESULTS
 
 create table src_multi1 like src;
 create table src_multi2 like src;
@@ -15,8 +15,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=true;
@@ -31,8 +31,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
@@ -46,8 +46,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
@@ -61,8 +61,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 
@@ -78,8 +78,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=false;
@@ -94,8 +94,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
@@ -109,8 +109,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=true;
@@ -125,8 +125,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 
@@ -143,8 +143,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
@@ -158,8 +158,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
@@ -173,8 +173,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
@@ -188,8 +188,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 

Modified: hive/trunk/ql/src/test/queries/clientpositive/multi_insert_gby.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/multi_insert_gby.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/multi_insert_gby.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/multi_insert_gby.q Fri May 30 17:37:05 2014
@@ -1,33 +1,35 @@
+-- SORT_QUERY_RESULTS
+
 --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables
 create table e1 (key string, count int);
 create table e2 (key string, count int);
 
 explain FROM src
 INSERT OVERWRITE TABLE e1
-    SELECT key, COUNT(*) WHERE key>450 GROUP BY key ORDER BY key
+    SELECT key, COUNT(*) WHERE key>450 GROUP BY key
 INSERT OVERWRITE TABLE e2
-    SELECT key, COUNT(*) WHERE key>500 GROUP BY key ORDER BY key;
+    SELECT key, COUNT(*) WHERE key>500 GROUP BY key;
 
 FROM src
 INSERT OVERWRITE TABLE e1
-    SELECT key, COUNT(*) WHERE key>450 GROUP BY key ORDER BY key
+    SELECT key, COUNT(*) WHERE key>450 GROUP BY key
 INSERT OVERWRITE TABLE e2
-    SELECT key, COUNT(*) WHERE key>500 GROUP BY key ORDER BY key;
+    SELECT key, COUNT(*) WHERE key>500 GROUP BY key;
 
 select * from e1;
 select * from e2;
 
 explain FROM src
 INSERT OVERWRITE TABLE e1
-    SELECT key, COUNT(*) WHERE key>450 GROUP BY key ORDER BY key
+    SELECT key, COUNT(*) WHERE key>450 GROUP BY key
 INSERT OVERWRITE TABLE e2
-    SELECT key, COUNT(*) GROUP BY key ORDER BY key;
+    SELECT key, COUNT(*) GROUP BY key;
 
 FROM src
 INSERT OVERWRITE TABLE e1
-    SELECT key, COUNT(*) WHERE key>450 GROUP BY key ORDER BY key
+    SELECT key, COUNT(*) WHERE key>450 GROUP BY key
 INSERT OVERWRITE TABLE e2
-    SELECT key, COUNT(*) GROUP BY key ORDER BY key;
+    SELECT key, COUNT(*) GROUP BY key;
 
 select * from e1;
 select * from e2;

Modified: hive/trunk/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 create table src_10 as select * from src limit 10;
 
 create table src_lv1 (key string, value string);
@@ -18,8 +20,8 @@ from src_10
 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
 insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C;
 
-select * from src_lv1 order by key, value;
-select * from src_lv2 order by key, value;
+select * from src_lv1;
+select * from src_lv2;
 
 -- 2(LV+GBY)
 -- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16]
@@ -35,8 +37,8 @@ from src_10
 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
 insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key;
 
-select * from src_lv1 order by key, value;
-select * from src_lv2 order by key, value;
+select * from src_lv1;
+select * from src_lv2;
 
 -- (LV+GBY) + RS:2GBY
 -- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[6]-GBY[7]-RS[8]-GBY[9]-SEL[10]-FS[11]
@@ -54,9 +56,9 @@ insert overwrite table src_lv1 select ke
 insert overwrite table src_lv2 select key, count(value) where key > 200 group by key
 insert overwrite table src_lv3 select key, count(value) where key < 200 group by key;
 
-select * from src_lv1 order by key, value;
-select * from src_lv2 order by key, value;
-select * from src_lv3 order by key, value;
+select * from src_lv1;
+select * from src_lv2;
+select * from src_lv3;
 
 -- todo: shared distinct columns (should work with hive.optimize.multigroupby.common.distincts)
 -- 2(LV+GBY) + RS:2GBY
@@ -76,9 +78,9 @@ insert overwrite table src_lv1 select C,
 insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C
 insert overwrite table src_lv3 select value, sum(distinct key) group by value;
 
-select * from src_lv1 order by key, value;
-select * from src_lv2 order by key, value;
-select * from src_lv3 order by key, value;
+select * from src_lv1;
+select * from src_lv2;
+select * from src_lv3;
 
 create table src_lv4 (key string, value string);
 
@@ -96,7 +98,7 @@ insert overwrite table src_lv2 select ke
 insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value
 insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value;
 
-select * from src_lv1 order by key, value;
-select * from src_lv2 order by key, value;
-select * from src_lv3 order by key, value;
-select * from src_lv4 order by key, value;
+select * from src_lv1;
+select * from src_lv2;
+select * from src_lv3;
+select * from src_lv4;

Modified: hive/trunk/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q Fri May 30 17:37:05 2014
@@ -1,5 +1,7 @@
 set hive.multi.insert.move.tasks.share.dependencies=true;
 
+-- SORT_QUERY_RESULTS
+
 create table src_multi1 like src;
 create table src_multi2 like src;
 
@@ -15,8 +17,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=true;
@@ -31,8 +33,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
@@ -46,8 +48,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
@@ -61,8 +63,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 
@@ -78,8 +80,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=false;
@@ -94,8 +96,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
@@ -109,8 +111,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 set hive.merge.mapfiles=true;
@@ -125,8 +127,8 @@ from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
 insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 
@@ -143,8 +145,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
@@ -158,8 +160,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
@@ -173,8 +175,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
@@ -188,8 +190,8 @@ from (select * from src  union all selec
 insert overwrite table src_multi1 select * where key < 10
 insert overwrite table src_multi2 select * where key > 10 and key < 20;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 
 
@@ -341,8 +343,8 @@ insert overwrite table src_multi2 select
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -363,8 +365,8 @@ insert overwrite table src_multi2 select
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -385,8 +387,8 @@ insert overwrite table src_multi2 select
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -407,8 +409,8 @@ insert overwrite table src_multi2 select
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1 order by key, value;
-select * from src_multi2 order by key, value;
+select * from src_multi1;
+select * from src_multi2;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;

Modified: hive/trunk/ql/src/test/queries/clientpositive/newline.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/newline.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/newline.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/newline.q Fri May 30 17:37:05 2014
@@ -1,12 +1,14 @@
 add file ../../data/scripts/newline.py;
 set hive.transform.escape.input=true;
 
+-- SORT_QUERY_RESULTS
+
 create table tmp_tmp(key string, value string) stored as rcfile;
 insert overwrite table tmp_tmp
 SELECT TRANSFORM(key, value) USING
 'python newline.py' AS key, value FROM src limit 6;
 
-select * from tmp_tmp ORDER BY key ASC, value ASC;
+select * from tmp_tmp;
 
 drop table tmp_tmp;
 
@@ -20,7 +22,7 @@ insert overwrite table tmp_tmp
 SELECT TRANSFORM(key, value) USING
 'python escapednewline.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp ORDER BY key ASC, value ASC;
+select * from tmp_tmp;
 
 SELECT TRANSFORM(key, value) USING
 'cat' AS (key, value) FROM tmp_tmp;
@@ -29,7 +31,7 @@ insert overwrite table tmp_tmp
 SELECT TRANSFORM(key, value) USING
 'python escapedcarriagereturn.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp ORDER BY key ASC, value ASC;
+select * from tmp_tmp;
 
 SELECT TRANSFORM(key, value) USING
 'cat' AS (key, value) FROM tmp_tmp;
@@ -38,7 +40,7 @@ insert overwrite table tmp_tmp
 SELECT TRANSFORM(key, value) USING
 'python escapedtab.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp ORDER BY key ASC, value ASC;
+select * from tmp_tmp;
 
 SELECT TRANSFORM(key, value) USING
 'cat' AS (key, value) FROM tmp_tmp;
@@ -47,7 +49,7 @@ insert overwrite table tmp_tmp
 SELECT TRANSFORM(key, value) USING
 'python doubleescapedtab.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp ORDER BY key ASC, value ASC;
+select * from tmp_tmp;
 
 SELECT TRANSFORM(key, value) USING
 'cat' AS (key, value) FROM tmp_tmp;

Modified: hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 -- negative, references twice for result of funcion
 explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a;
 
@@ -12,7 +14,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4 order by key, value, count;
+      ) tmp4;
 
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
@@ -21,7 +23,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4 order by key, value, count;
+      ) tmp4;
 
 set hive.auto.convert.join=true;
 -- Then, we convert the join to MapJoin.
@@ -33,7 +35,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4 order by key, value, count;
+      ) tmp4;
 
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
@@ -42,4 +44,4 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4 order by key, value, count;
+      ) tmp4;

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_create.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_create.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_create.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_create.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 DROP TABLE orc_create;
 DROP TABLE orc_create_complex;
 DROP TABLE orc_create_staging;
@@ -114,8 +116,7 @@ SELECT COUNT(*) FROM orc_create_people w
 
 -- test predicate push down with no column projection
 SELECT id, first_name, last_name, address
-  FROM orc_create_people WHERE id > 90
-  ORDER BY id, first_name, last_name;
+  FROM orc_create_people WHERE id > 90;
 
 DROP TABLE orc_create;
 DROP TABLE orc_create_complex;

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 CREATE TABLE test_orc (key STRING)
 PARTITIONED BY (part STRING)
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
@@ -16,4 +18,4 @@ ALTER TABLE test_orc ADD COLUMNS (cnt IN
 
 INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5;
 
-SELECT * FROM test_orc ORDER BY key; 
+SELECT * FROM test_orc;

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 -- Create a table with one column, write to it, then add an additional column
 -- This can break reads
 
+-- SORT_QUERY_RESULTS
+
 CREATE TABLE test_orc (key STRING)
 STORED AS ORC;
 
@@ -8,4 +10,4 @@ INSERT OVERWRITE TABLE test_orc SELECT k
 
 ALTER TABLE test_orc ADD COLUMNS (value STRING);
 
-SELECT * FROM test_orc order by key;
+SELECT * FROM test_orc;

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_empty_strings.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_empty_strings.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_empty_strings.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_empty_strings.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 CREATE TABLE test_orc (key STRING)
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
@@ -13,4 +15,4 @@ INSERT OVERWRITE TABLE test_orc SELECT I
 
 -- Test reading a column which has some empty strings
 
-SELECT * FROM test_orc ORDER BY key;
+SELECT * FROM test_orc;

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 CREATE TABLE orc_pred(t tinyint,
            si smallint,
            i int,
@@ -87,28 +89,28 @@ SELECT t, s FROM orc_pred
   WHERE t <=> -1
   AND s IS NOT NULL
   AND s LIKE 'bob%'
-  ORDER BY s;
+  ;
 
 SET hive.optimize.index.filter=true;
 SELECT t, s FROM orc_pred
   WHERE t <=> -1
   AND s IS NOT NULL
   AND s LIKE 'bob%'
-  ORDER BY s;
+  ;
 SET hive.optimize.index.filter=false;
 
 EXPLAIN SELECT t, s FROM orc_pred
   WHERE t <=> -1
   AND s IS NOT NULL
   AND s LIKE 'bob%'
-  ORDER BY s;
+  ;
 
 SET hive.optimize.index.filter=true;
 EXPLAIN SELECT t, s FROM orc_pred
   WHERE t <=> -1
   AND s IS NOT NULL
   AND s LIKE 'bob%'
-  ORDER BY s;
+  ;
 SET hive.optimize.index.filter=false;
 
 SELECT t, s FROM orc_pred

Modified: hive/trunk/ql/src/test/queries/clientpositive/orc_split_elimination.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/orc_split_elimination.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/orc_split_elimination.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/orc_split_elimination.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
 
 load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim;
@@ -61,43 +63,43 @@ select count(*) from orc_split_elim wher
 SET hive.optimize.index.filter=false;
 
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2;
 
 SET hive.optimize.index.filter=true;
 -- 1 mapper
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2;
 SET hive.optimize.index.filter=false;
 
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5;
 
 SET hive.optimize.index.filter=true;
 -- 2 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5;
 SET hive.optimize.index.filter=false;
 
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13;
 
 SET hive.optimize.index.filter=true;
 -- 3 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13;
 SET hive.optimize.index.filter=false;
 
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29;
 
 SET hive.optimize.index.filter=true;
 -- 4 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29;
 SET hive.optimize.index.filter=false;
 
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70;
 
 SET hive.optimize.index.filter=true;
 -- 5 mappers
-select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70;
 SET hive.optimize.index.filter=false;
 
 -- partitioned table
@@ -110,59 +112,59 @@ load data local inpath '../../data/files
 load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2001);
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us';
 
 SET hive.optimize.index.filter=true;
 -- 2 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us';
 SET hive.optimize.index.filter=false;
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001) order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001);
 
 SET hive.optimize.index.filter=true;
 -- 2 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001) order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001);
 SET hive.optimize.index.filter=false;
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000;
 
 SET hive.optimize.index.filter=true;
 -- 1 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000;
 SET hive.optimize.index.filter=false;
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us';
 
 SET hive.optimize.index.filter=true;
 -- 4 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us';
 SET hive.optimize.index.filter=false;
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001) order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001);
 
 SET hive.optimize.index.filter=true;
 -- 4 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001) order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001);
 SET hive.optimize.index.filter=false;
 
 -- 10 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000;
 
 SET hive.optimize.index.filter=true;
 -- 2 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000;
 SET hive.optimize.index.filter=false;
 
 -- 0 mapper - no split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in' order by userid;
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in';
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002;
 
 SET hive.optimize.index.filter=true;
 -- 0 mapper - split elimination
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in' order by userid;
-select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002 order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in';
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002;
 SET hive.optimize.index.filter=false;

Modified: hive/trunk/ql/src/test/queries/clientpositive/parallel.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/parallel.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/parallel.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/parallel.q Fri May 30 17:37:05 2014
@@ -2,6 +2,8 @@ set mapred.job.name='test_parallel';
 set hive.exec.parallel=true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
+-- SORT_QUERY_RESULTS
+
 create table if not exists src_a like src;
 create table if not exists src_b like src;
 
@@ -14,8 +16,8 @@ from (select key, value from src group b
 insert overwrite table src_a select s.key, s.value group by s.key, s.value
 insert overwrite table src_b select s.key, s.value group by s.key, s.value;
 
-select * from src_a order by key, value;
-select * from src_b order by key, value;
+select * from src_a;
+select * from src_b;
 
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
@@ -24,5 +26,5 @@ from (select key, value from src group b
 insert overwrite table src_a select s.key, s.value group by s.key, s.value
 insert overwrite table src_b select s.key, s.value group by s.key, s.value;
 
-select * from src_a order by key, value;
-select * from src_b order by key, value;
+select * from src_a;
+select * from src_b;

Modified: hive/trunk/ql/src/test/queries/clientpositive/parquet_partitioned.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/parquet_partitioned.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/parquet_partitioned.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/parquet_partitioned.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.exec.dynamic.partition=true;
 
+-- SORT_QUERY_RESULTS
+
 DROP TABLE parquet_partitioned_staging;
 DROP TABLE parquet_partitioned;
 
@@ -26,9 +28,9 @@ SELECT * FROM parquet_partitioned_stagin
 INSERT OVERWRITE TABLE parquet_partitioned PARTITION (part) SELECT * FROM parquet_partitioned_staging;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT * FROM parquet_partitioned ORDER BY id, str;
+SELECT * FROM parquet_partitioned;
 SELECT part, COUNT(0) FROM parquet_partitioned GROUP BY part;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-SELECT * FROM parquet_partitioned ORDER BY id, str;
+SELECT * FROM parquet_partitioned;
 SELECT part, COUNT(0) FROM parquet_partitioned GROUP BY part;

Modified: hive/trunk/ql/src/test/queries/clientpositive/stats4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/stats4.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/stats4.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/stats4.q Fri May 30 17:37:05 2014
@@ -1,6 +1,8 @@
 set datanucleus.cache.collections=false;
 set hive.stats.autogather=true;
 
+-- SORT_AND_HASH_QUERY_RESULTS
+
 show partitions srcpart;
 
 drop table nzhang_part1;
@@ -25,8 +27,8 @@ insert overwrite table nzhang_part2 part
 show partitions nzhang_part1;
 show partitions nzhang_part2;
 
-select * from nzhang_part1 where ds is not null and hr is not null order by ds, hr, key;
-select * from nzhang_part2 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part1 where ds is not null and hr is not null;
+select * from nzhang_part2 where ds is not null and hr is not null;
 
 describe formatted nzhang_part1 partition(ds='2008-04-08',hr=11);
 describe formatted nzhang_part1 partition(ds='2008-04-08',hr=12);

Modified: hive/trunk/ql/src/test/queries/clientpositive/subquery_in.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/subquery_in.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/subquery_in.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/subquery_in.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 DROP TABLE part;
 
 -- data setup
@@ -47,7 +49,6 @@ where src.key in (select key from src s1
 select * 
 from src 
 where src.key in (select key from src s1 where s1.key > '9')
-order by key
 ;
 
 -- non agg, corr
@@ -68,7 +69,6 @@ where b.key in
          from src a 
          where b.value = a.value and a.key > '9'
         )
-order by b.key
 ;
 
 -- agg, non corr
@@ -88,7 +88,6 @@ part where part.p_size in 
 	 from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
 	 where r <= 2
 	)
-order by p_name
 ;
 
 -- agg, corr
@@ -107,7 +106,6 @@ from part b where b.p_size in 
 	 from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
 	 where r <= 2 and b.p_mfgr = a.p_mfgr
 	)
-order by p_mfgr, p_name, p_size 
 ;
 
 -- distinct, corr
@@ -128,7 +126,6 @@ where b.key in
          from src a 
          where b.value = a.value and a.key > '9'
         )
-order by b.key
 ;
 
 -- non agg, non corr, windowing
@@ -136,7 +133,6 @@ select p_mfgr, p_name, p_size 
 from part 
 where part.p_size in 
   (select first_value(p_size) over(partition by p_mfgr order by p_size) from part)
-order by p_mfgr, p_name, p_size 
 ;
 
 -- non agg, non corr, with join in Parent Query
@@ -151,7 +147,6 @@ select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR')
-order by p.p_partkey, li.l_suppkey 
 ;
 
 -- non agg, corr, with join in Parent Query
@@ -159,5 +154,4 @@ select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
-order by p.p_partkey, li.l_suppkey 
 ;

Modified: hive/trunk/ql/src/test/queries/clientpositive/subquery_in_having.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/subquery_in_having.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/subquery_in_having.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/subquery_in_having.q Fri May 30 17:37:05 2014
@@ -1,3 +1,4 @@
+-- SORT_QUERY_RESULTS
 
 -- data setup
 CREATE TABLE part( 
@@ -23,13 +24,12 @@ having count(*) in (select count(*) from
 ;
 
 
-select s1.key, count(*) from src s1 where s1.key > '9' group by s1.key order by s1.key;
+select s1.key, count(*) from src s1 where s1.key > '9' group by s1.key;
 
 select key, count(*) 
 from src 
 group by key
 having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key )
-order by key
 ;
 
 -- non agg, corr
@@ -61,7 +61,6 @@ having b.key in ( select a.key
                 from src a
                 where a.value > 'val_9' and a.value = min(b.value)
                 )
-order by b.key
 ;
 
 -- where and having

Modified: hive/trunk/ql/src/test/queries/clientpositive/subquery_views.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/subquery_views.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/subquery_views.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/subquery_views.q Fri May 30 17:37:05 2014
@@ -1,4 +1,4 @@
-
+-- SORT_QUERY_RESULTS
 
 -- exists test
 create view cv1 as 
@@ -11,7 +11,7 @@ where exists
 ;
 
 select * 
-from cv1 where cv1.key in (select key from cv1 c where c.key > '95') order by key;
+from cv1 where cv1.key in (select key from cv1 c where c.key > '95');
 ;
 
 
@@ -27,7 +27,7 @@ where b.key not in
 ;
 
 select * 
-from cv2 where cv2.key in (select key from cv2 c where c.key < '11') order by key;
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11');
 ;
 
 -- in where + having
@@ -39,10 +39,10 @@ group by key, value
 having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
 ;
 
-select * from cv3 order by key;
+select * from cv3;
 
 
 -- join of subquery views
 select *
 from cv3
-where cv3.key in (select key from cv1) order by key;
+where cv3.key in (select key from cv1);
\ No newline at end of file

Modified: hive/trunk/ql/src/test/queries/clientpositive/union22.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union22.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union22.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union22.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string);
 create table dst_union22_delta(k0 string, k1 string, k2 string, k3 string, k4 string, k5 string) partitioned by (ds string);
 
@@ -40,4 +42,4 @@ where a.k1 > 20
 )
 subq;
 
-select * from dst_union22 where ds = '2' order by k1, k2, k3, k4;
+select * from dst_union22 where ds = '2';

Modified: hive/trunk/ql/src/test/queries/clientpositive/union24.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union24.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union24.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union24.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 create table src2 as select key, count(1) as count from src group by key;
 create table src3 as select * from src2;
 create table src4 as select * from src2;
@@ -18,7 +20,7 @@ select s.key, s.count from (
   union all
   select key, count(1) as count from src5 where key < 10 group by key
 )s
-order by s.key ASC, s.count ASC;
+;
 
 select s.key, s.count from (
   select key, count from src2  where key < 10
@@ -29,7 +31,7 @@ select s.key, s.count from (
   union all
   select key, count(1) as count from src5 where key < 10 group by key
 )s
-order by s.key ASC, s.count ASC;
+;
 
 explain extended
 select s.key, s.count from (
@@ -39,7 +41,7 @@ select s.key, s.count from (
   union all
   select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10
 )s
-order by s.key ASC, s.count ASC;
+;
 
 select s.key, s.count from (
   select key, count from src2  where key < 10
@@ -48,7 +50,7 @@ select s.key, s.count from (
   union all
   select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10
 )s
-order by s.key ASC, s.count ASC;
+;
 
 explain extended
 select s.key, s.count from (
@@ -58,7 +60,7 @@ select s.key, s.count from (
   union all
   select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key
 )s
-order by s.key ASC, s.count ASC;
+;
 
 select s.key, s.count from (
   select key, count from src2  where key < 10
@@ -67,4 +69,4 @@ select s.key, s.count from (
   union all
   select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key
 )s
-order by s.key ASC, s.count ASC;
+;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union26.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union26.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union26.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union26.q Fri May 30 17:37:05 2014
@@ -1,4 +1,6 @@
-EXPLAIN 
+-- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT 
 count(1) as counts,
 key,
@@ -40,7 +42,7 @@ select key, value 
 FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
 WHERE ds='2008-04-08' and hr='11'
 ) a
-group by key, value order by key, value
+group by key, value
 ;
 
 
@@ -85,5 +87,5 @@ select key, value 
 FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
 WHERE ds='2008-04-08' and hr='11'
 ) a
-group by key, value order by key, value
+group by key, value
 ;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union31.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union31.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union31.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union31.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 drop table t1;
 drop table t2;
 
@@ -29,8 +31,8 @@ insert overwrite table t3
 insert overwrite table t4
   select value, count(1) group by value;
 
-select * from t3 order by key;
-select * from t4 order by value;
+select * from t3;
+select * from t4;
 
 create table t5(c1 string, cnt int);
 create table t6(c1 string, cnt int);
@@ -58,8 +60,8 @@ insert overwrite table t5
 insert overwrite table t6
   select c1, sum(cnt) group by c1;
 
-select * from t5 order by c1;
-select * from t6 order by c1;
+select * from t5;
+select * from t6;
 
 drop table t1;
 drop table t2;
@@ -93,5 +95,5 @@ insert overwrite table t7
 insert overwrite table t8
   select c1, count(1) group by c1;
 
-select * from t7 order by c1;
-select * from t8 order by c1;
+select * from t7;
+select * from t8;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union32.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union32.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union32.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union32.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 -- This tests various union queries which have columns on one side of the query
 -- being of double type and those on the other side another
 
@@ -16,7 +18,7 @@ SELECT * FROM 
 (SELECT CAST(key AS DOUBLE) AS key FROM t1
 UNION ALL
 SELECT CAST(key AS BIGINT) AS key FROM t2) a
-ORDER BY key;
+;
 
 -- Test union with join on the left
 EXPLAIN
@@ -24,13 +26,13 @@ SELECT * FROM 
 (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key
 UNION ALL
 SELECT CAST(key AS DOUBLE) AS key FROM t2) a
-ORDER BY key;
+;
 
 SELECT * FROM 
 (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key
 UNION ALL
 SELECT CAST(key AS DOUBLE) AS key FROM t2) a
-ORDER BY key;
+;
 
 -- Test union with join on the right
 EXPLAIN
@@ -38,13 +40,13 @@ SELECT * FROM 
 (SELECT CAST(key AS DOUBLE) AS key FROM t2
 UNION ALL
 SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a
-ORDER BY key;
+;
 
 SELECT * FROM 
 (SELECT CAST(key AS DOUBLE) AS key FROM t2
 UNION ALL
 SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a
-ORDER BY key;
+;
 
 -- Test union with join on the left selecting multiple columns
 EXPLAIN
@@ -52,13 +54,13 @@ SELECT * FROM 
 (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS DOUBLE) AS value FROM t1 a JOIN t2 b ON a.key = b.key
 UNION ALL
 SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a
-ORDER BY key;
+;
 
 SELECT * FROM 
 (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS DOUBLE) AS value FROM t1 a JOIN t2 b ON a.key = b.key
 UNION ALL
 SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a
-ORDER BY key;
+;
 
 -- Test union with join on the right selecting multiple columns
 EXPLAIN
@@ -66,10 +68,10 @@ SELECT * FROM 
 (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2
 UNION ALL
 SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS DOUBLE) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a
-ORDER BY key;
+;
 
 SELECT * FROM 
 (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2
 UNION ALL
 SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS DOUBLE) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a
-ORDER BY key;
+;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_date.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_date.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_date.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_date.q Fri May 30 17:37:05 2014
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 drop table union_date_1;
 drop table union_date_2;
 
@@ -24,7 +26,7 @@ select * from (
   select fl_num, fl_date from union_date_1
   union all
   select fl_num, fl_date from union_date_2
-) union_result order by fl_date, fl_num;
+) union_result;
 
 drop table union_date_1;
 drop table union_date_2;

Modified: hive/trunk/ql/src/test/queries/clientpositive/union_top_level.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/union_top_level.q?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/union_top_level.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/union_top_level.q Fri May 30 17:37:05 2014
@@ -1,33 +1,27 @@
+-- SORT_QUERY_RESULTS
+
 -- top level
 explain
-select * from (
 select key, 0 as value from src where key % 3 == 0 limit 3
 union all
 select key, 1 as value from src where key % 3 == 1 limit 3
 union all
-select key, 2 as value from src where key % 3 == 2 limit 3
-) q1 order by key;
+select key, 2 as value from src where key % 3 == 2 limit 3;
 
-select * from (
 select key, 0 as value from src where key % 3 == 0 limit 3
 union all
 select key, 1 as value from src where key % 3 == 1 limit 3
 union all
-select key, 2 as value from src where key % 3 == 2 limit 3
-) q1 order by key;
+select key, 2 as value from src where key % 3 == 2 limit 3;
 
 explain
-select * from (
 select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
 union all
-select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
-) q1 order by k;
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10;
 
-select * from (
 select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
 union all
-select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
-) q1 order by k;
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10;
 
 -- ctas
 explain
@@ -45,7 +39,7 @@ select key, 1 as value from src where ke
 union all
 select key, 2 as value from src where key % 3 == 2 limit 3;
 
-select * from union_top order by key;
+select * from union_top;
 
 truncate table union_top;
 
@@ -65,7 +59,7 @@ select key, 1 as value from src where ke
 union all
 select key, 2 as value from src where key % 3 == 2 limit 3;
 
-select * from union_top order by key;
+select * from union_top;
 
 explain
 insert overwrite table union_top
@@ -82,7 +76,7 @@ select key, 1 as value from src where ke
 union all
 select key, 2 as value from src where key % 3 == 2 limit 3;
 
-select * from union_top order by key;
+select * from union_top;
 
 -- create view
 explain
@@ -100,7 +94,7 @@ select key, 1 as value from src where ke
 union all
 select key, 2 as value from src where key % 3 == 2 limit 3;
 
-select * from union_top_view order by key;
+select * from union_top_view;
 
 drop table union_top;
 drop view union_top_view;

Modified: hive/trunk/ql/src/test/results/clientpositive/alter_char1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/alter_char1.q.out?rev=1598678&r1=1598677&r2=1598678&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/alter_char1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/alter_char1.q.out Fri May 30 17:37:05 2014
@@ -1,6 +1,10 @@
-PREHOOK: query: drop table alter_char_1
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table alter_char_1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table alter_char_1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table alter_char_1
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: create table alter_char_1 (key string, value string)
 PREHOOK: type: CREATETABLE
@@ -21,11 +25,11 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@alter_char_1
 POSTHOOK: Lineage: alter_char_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: alter_char_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select * from alter_char_1 order by key
+PREHOOK: query: select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from alter_char_1 order by key
+POSTHOOK: query: select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
@@ -45,12 +49,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@alter_char_1
 POSTHOOK: Output: default@alter_char_1
 PREHOOK: query: -- contents should still look the same
-select * from alter_char_1 order by key
+select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
 POSTHOOK: query: -- contents should still look the same
-select * from alter_char_1 order by key
+select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
@@ -70,12 +74,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@alter_char_1
 POSTHOOK: Output: default@alter_char_1
 PREHOOK: query: -- value column should be truncated now
-select * from alter_char_1 order by key
+select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
 POSTHOOK: query: -- value column should be truncated now
-select * from alter_char_1 order by key
+select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
@@ -95,12 +99,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@alter_char_1
 POSTHOOK: Output: default@alter_char_1
 PREHOOK: query: -- column values should be full size again
-select * from alter_char_1 order by key
+select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
 POSTHOOK: query: -- column values should be full size again
-select * from alter_char_1 order by key
+select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
@@ -119,11 +123,11 @@ alter table alter_char_1 add columns (ke
 POSTHOOK: type: ALTERTABLE_ADDCOLS
 POSTHOOK: Input: default@alter_char_1
 POSTHOOK: Output: default@alter_char_1
-PREHOOK: query: select * from alter_char_1 order by key
+PREHOOK: query: select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from alter_char_1 order by key
+POSTHOOK: query: select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
@@ -146,11 +150,11 @@ POSTHOOK: Lineage: alter_char_1.key SIMP
 POSTHOOK: Lineage: alter_char_1.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: alter_char_1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: alter_char_1.value2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select * from alter_char_1 order by key
+PREHOOK: query: select * from alter_char_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####
-POSTHOOK: query: select * from alter_char_1 order by key
+POSTHOOK: query: select * from alter_char_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_char_1
 #### A masked pattern was here ####