You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2013/08/13 15:54:49 UTC

svn commit: r1513495 - in /hive/trunk/ql/src/test: queries/clientpositive/ results/clientpositive/

Author: brock
Date: Tue Aug 13 13:54:48 2013
New Revision: 1513495

URL: http://svn.apache.org/r1513495
Log:
HIVE-5063: Fix some non-deterministic or not-updated tests (Navis via Brock Noland)

Modified:
    hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q
    hive/trunk/ql/src/test/queries/clientpositive/partition_date.q
    hive/trunk/ql/src/test/queries/clientpositive/partition_date2.q
    hive/trunk/ql/src/test/queries/clientpositive/ppd_vc.q
    hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out
    hive/trunk/ql/src/test/results/clientpositive/input12.q.out
    hive/trunk/ql/src/test/results/clientpositive/join14.q.out
    hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
    hive/trunk/ql/src/test/results/clientpositive/partition_date.q.out
    hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_remove_19.q.out

Modified: hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/nonblock_op_deduplicate.q Tue Aug 13 13:54:48 2013
@@ -12,7 +12,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4;
+      ) tmp4 order by key, value, count;
 
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
@@ -21,7 +21,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4;
+      ) tmp4 order by key, value, count;
 
 set hive.auto.convert.join=true;
 -- Then, we convert the join to MapJoin.
@@ -33,7 +33,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4;
+      ) tmp4 order by key, value, count;
 
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
@@ -42,4 +42,4 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4;
+      ) tmp4 order by key, value, count;

Modified: hive/trunk/ql/src/test/queries/clientpositive/partition_date.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/partition_date.q?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/partition_date.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/partition_date.q Tue Aug 13 13:54:48 2013
@@ -12,7 +12,7 @@ insert overwrite table partition_date_1 
   select * from src limit 11;
 
 select distinct dt from partition_date_1;
-select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2;
+select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value;
 
 -- 15
 select count(*) from partition_date_1 where dt = date '2000-01-01';

Modified: hive/trunk/ql/src/test/queries/clientpositive/partition_date2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/partition_date2.q?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/partition_date2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/partition_date2.q Tue Aug 13 13:54:48 2013
@@ -3,12 +3,10 @@ drop table partition_date2_1;
 create table partition_date2_1 (key string, value string) partitioned by (dt date, region int);
 
 -- test date literal syntax
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1)
-  select * from src limit 1;
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
-  select * from src limit 1;
-insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2)
-  select * from src limit 1;
+from (select * from src limit 1) x
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
+insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *;
 
 select distinct dt from partition_date2_1;
 select * from partition_date2_1;
@@ -44,7 +42,7 @@ describe extended partition_date2_1  par
 
 insert overwrite table partition_date2_1 partition(dt=date '1980-01-02', region=3)
   select * from src limit 2;
-select * from partition_date2_1;
+select * from partition_date2_1 order by key,value,dt,region;
 
 -- alter table set location
 alter table partition_date2_1 partition(dt=date '1980-01-02', region=3)

Modified: hive/trunk/ql/src/test/queries/clientpositive/ppd_vc.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/ppd_vc.q?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/ppd_vc.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/ppd_vc.q Tue Aug 13 13:54:48 2013
@@ -1,7 +1,14 @@
 --HIVE-3926 PPD on virtual column of partitioned table is not working
 
-explain extended select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100;
+explain extended
 select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100;
+select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100;
+
+explain extended
+select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE;
 
-explain extended select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50;
-select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50;
+select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE;

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out Tue Aug 13 13:54:48 2013
@@ -37,7 +37,7 @@ STAGE PLANS:
             alias: src
             Filter Operator
               predicate:
-                  expr: (key > 100.0)
+                  expr: (key > 100)
                   type: boolean
               HashTable Sink Operator
                 condition expressions:
@@ -57,7 +57,7 @@ STAGE PLANS:
             alias: srcpart
             Filter Operator
               predicate:
-                  expr: (key > 100.0)
+                  expr: (key > 100)
                   type: boolean
               Map Join Operator
                 condition map:

Modified: hive/trunk/ql/src/test/results/clientpositive/input12.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/input12.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/input12.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/input12.q.out Tue Aug 13 13:54:48 2013
@@ -65,7 +65,7 @@ STAGE PLANS:
             alias: src
             Filter Operator
               predicate:
-                  expr: (key < 100.0)
+                  expr: (key < 100)
                   type: boolean
               Select Operator
                 expressions:
@@ -84,7 +84,7 @@ STAGE PLANS:
                       name: default.dest1
             Filter Operator
               predicate:
-                  expr: ((key >= 100.0) and (key < 200.0))
+                  expr: ((key >= 100) and (key < 200))
                   type: boolean
               Select Operator
                 expressions:
@@ -103,7 +103,7 @@ STAGE PLANS:
                       name: default.dest2
             Filter Operator
               predicate:
-                  expr: (key >= 200.0)
+                  expr: (key >= 200)
                   type: boolean
               Select Operator
                 expressions:

Modified: hive/trunk/ql/src/test/results/clientpositive/join14.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/join14.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/join14.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/join14.q.out Tue Aug 13 13:54:48 2013
@@ -32,7 +32,7 @@ STAGE PLANS:
             alias: src
             Filter Operator
               predicate:
-                  expr: (key > 100.0)
+                  expr: (key > 100)
                   type: boolean
               Reduce Output Operator
                 key expressions:
@@ -51,7 +51,7 @@ STAGE PLANS:
             alias: srcpart
             Filter Operator
               predicate:
-                  expr: (key > 100.0)
+                  expr: (key > 100)
                   type: boolean
               Reduce Output Operator
                 key expressions:

Modified: hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out Tue Aug 13 13:54:48 2013
@@ -52,7 +52,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 PREHOOK: type: QUERY
 POSTHOOK: query: -- This test query is introduced for HIVE-4968.
 -- First, we do not convert the join to MapJoin.
@@ -64,14 +64,15 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count))))
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL count)))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2
   Stage-0 is a root stage
 
 STAGE PLANS:
@@ -163,8 +164,38 @@ STAGE PLANS:
               compressed: false
               GlobalTableId: 0
               table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+            Reduce Output Operator
+              key expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col2
+                    type: bigint
+              sort order: +++
+              tag: -1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col2
+                    type: bigint
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
   Stage: Stage-0
     Fetch Operator
@@ -178,7 +209,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
@@ -189,35 +220,35 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 #### A masked pattern was here ####
-238	val_238	25
 		25
-311	val_311	25
-	val_27	25
+		25
+		25
+		25
 	val_165	25
+	val_193	25
+	val_265	25
+	val_27	25
 	val_409	25
-255	val_255	25
-278	val_278	25
-98	val_98	25
 	val_484	25
-	val_265	25
-	val_193	25
-401	val_401	25
+128		25
+146	val_146	25
 150	val_150	25
-273	val_273	25
+213	val_213	25
 224		25
+238	val_238	25
+255	val_255	25
+273	val_273	25
+278	val_278	25
+311	val_311	25
 369		25
-66	val_66	25
-128		25
-213	val_213	25
-146	val_146	25
+401	val_401	25
 406	val_406	25
-		25
-		25
-		25
+66	val_66	25
+98	val_98	25
 PREHOOK: query: -- Then, we convert the join to MapJoin.
 EXPLAIN
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
@@ -227,7 +258,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Then, we convert the join to MapJoin.
 EXPLAIN
@@ -238,15 +269,15 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count))))
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL count)))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-5 depends on stages: Stage-1
-  Stage-4 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-6
   Stage-0 is a root stage
 
 STAGE PLANS:
@@ -288,7 +319,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
 
-  Stage: Stage-5
+  Stage: Stage-6
     Map Reduce Local Work
       Alias -> Map Local Tables:
         tmp4:tmp2:tmp1:src1 
@@ -315,7 +346,7 @@ STAGE PLANS:
                   1 []
                 Position of Big Table: 1
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Alias -> Map Operator Tree:
         $INTNAME 
@@ -340,14 +371,33 @@ STAGE PLANS:
                       expr: _col2
                       type: bigint
                 outputColumnNames: _col0, _col1, _col2
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col2
+                        type: bigint
+                  sort order: +++
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col2
+                        type: bigint
       Local Work:
         Map Reduce Local Work
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
   Stage: Stage-0
     Fetch Operator
@@ -361,7 +411,7 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
@@ -372,32 +422,32 @@ FROM (SELECT tmp2.key as key, tmp2.value
                   FROM src1) tmp1 ) tmp2
       JOIN (SELECT count(*) as count
             FROM src1) tmp3
-      ) tmp4
+      ) tmp4 order by key, value, count
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 #### A masked pattern was here ####
-238	val_238	25
 		25
-311	val_311	25
-	val_27	25
+		25
+		25
+		25
 	val_165	25
+	val_193	25
+	val_265	25
+	val_27	25
 	val_409	25
-255	val_255	25
-278	val_278	25
-98	val_98	25
 	val_484	25
-	val_265	25
-	val_193	25
-401	val_401	25
+128		25
+146	val_146	25
 150	val_150	25
-273	val_273	25
+213	val_213	25
 224		25
+238	val_238	25
+255	val_255	25
+273	val_273	25
+278	val_278	25
+311	val_311	25
 369		25
-66	val_66	25
-128		25
-213	val_213	25
-146	val_146	25
+401	val_401	25
 406	val_406	25
-		25
-		25
-		25
+66	val_66	25
+98	val_98	25

Modified: hive/trunk/ql/src/test/results/clientpositive/partition_date.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/partition_date.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/partition_date.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/partition_date.q.out Tue Aug 13 13:54:48 2013
@@ -93,12 +93,12 @@ POSTHOOK: Lineage: partition_date_1 PART
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 2000-01-01
 2013-08-08
-PREHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2
+PREHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_date_1
 PREHOOK: Input: default@partition_date_1@dt=2000-01-01/region=2
 #### A masked pattern was here ####
-POSTHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2
+POSTHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_date_1
 POSTHOOK: Input: default@partition_date_1@dt=2000-01-01/region=2
@@ -111,11 +111,11 @@ POSTHOOK: Lineage: partition_date_1 PART
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+165	val_165	2000-01-01	2	1969-12-31 16:00:00
 238	val_238	2000-01-01	2	1969-12-31 16:00:00
-86	val_86	2000-01-01	2	1969-12-31 16:00:00
-311	val_311	2000-01-01	2	1969-12-31 16:00:00
 27	val_27	2000-01-01	2	1969-12-31 16:00:00
-165	val_165	2000-01-01	2	1969-12-31 16:00:00
+311	val_311	2000-01-01	2	1969-12-31 16:00:00
+86	val_86	2000-01-01	2	1969-12-31 16:00:00
 PREHOOK: query: -- 15
 select count(*) from partition_date_1 where dt = date '2000-01-01'
 PREHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/partition_date2.q.out Tue Aug 13 13:54:48 2013
@@ -8,43 +8,25 @@ POSTHOOK: query: create table partition_
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@partition_date2_1
 PREHOOK: query: -- test date literal syntax
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1)
-  select * from src limit 1
+from (select * from src limit 1) x
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
+insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
+PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=1
+PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
 POSTHOOK: query: -- test date literal syntax
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1)
-  select * from src limit 1
+from (select * from src limit 1) x
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
+insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 POSTHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=1
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
-  select * from src limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
-POSTHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
-  select * from src limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
 POSTHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2)
-  select * from src limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
-POSTHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2)
-  select * from src limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=1999-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=1999-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
@@ -427,14 +409,14 @@ POSTHOOK: Lineage: partition_date2_1 PAR
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE []
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select * from partition_date2_1
+PREHOOK: query: select * from partition_date2_1 order by key,value,dt,region
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_date2_1
 PREHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3
 PREHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=1
 PREHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2
 #### A masked pattern was here ####
-POSTHOOK: query: select * from partition_date2_1
+POSTHOOK: query: select * from partition_date2_1 order by key,value,dt,region
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_date2_1
 POSTHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3
@@ -452,8 +434,8 @@ POSTHOOK: Lineage: partition_date2_1 PAR
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 238	val_238	1980-01-02	3
-86	val_86	1980-01-02	3
 238	val_238	2000-01-01	1
+86	val_86	1980-01-02	3
 PREHOOK: query: -- alter table set location
 alter table partition_date2_1 partition(dt=date '1980-01-02', region=3)
 #### A masked pattern was here ####

Modified: hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out Tue Aug 13 13:54:48 2013
@@ -1,10 +1,12 @@
 PREHOOK: query: --HIVE-3926 PPD on virtual column of partitioned table is not working
 
-explain extended select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100
+explain extended
+select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100
 PREHOOK: type: QUERY
 POSTHOOK: query: --HIVE-3926 PPD on virtual column of partitioned table is not working
 
-explain extended select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100
+explain extended
+select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:
   (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE) 100))))
@@ -305,15 +307,22 @@ POSTHOOK: Input: default@srcpart@ds=2008
 255	val_255	2008-04-09	12
 278	val_278	2008-04-09	12
 98	val_98	2008-04-09	12
-PREHOOK: query: explain extended select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50
+PREHOOK: query: explain extended
+select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50
+POSTHOOK: query: explain extended
+select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE
 POSTHOOK: type: QUERY
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF) (TOK_SELEXPR (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE))) (TOK_WHERE (< (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE) 100)))) b) (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) BLOCK__OFFSET__INSIDE__FILE) 50)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF) (TOK_SELEXPR (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE))) (TOK_WHERE (< (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE) 100)))) b) (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) BLOCK__OFFSET__INSIDE__FILE) 50)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME b)))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL ds)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL hr)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL BLOCK__OFFSET__INSIDE__FILE)))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
   Stage-0 is a root stage
 
 STAGE PLANS:
@@ -333,11 +342,6 @@ STAGE PLANS:
                     expr: key
                     type: string
               tag: 0
-              value expressions:
-                    expr: key
-                    type: string
-                    expr: value
-                    type: string
         b:srcpart 
           TableScan
             alias: srcpart
@@ -621,16 +625,12 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           condition expressions:
-            0 {VALUE._col0} {VALUE._col1}
+            0 
             1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4}
           handleSkewJoin: false
-          outputColumnNames: _col0, _col1, _col4, _col5, _col6, _col7, _col8
+          outputColumnNames: _col4, _col5, _col6, _col7, _col8
           Select Operator
             expressions:
-                  expr: _col0
-                  type: string
-                  expr: _col1
-                  type: string
                   expr: _col4
                   type: string
                   expr: _col5
@@ -641,32 +641,99 @@ STAGE PLANS:
                   type: string
                   expr: _col8
                   type: bigint
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-#### A masked pattern was here ####
               table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   properties:
-                    columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                    columns.types string:string:string:string:string:string:bigint
+                    columns _col0,_col1,_col2,_col3,_col4
+                    columns.types string,string,string,string,bigint
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
-                    serialization.format 1
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
 
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+            Reduce Output Operator
+              key expressions:
+                    expr: _col2
+                    type: string
+                    expr: _col3
+                    type: string
+                    expr: _col4
+                    type: bigint
+              sort order: +++
+              tag: -1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col2
+                    type: string
+                    expr: _col3
+                    type: string
+                    expr: _col4
+                    type: bigint
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -mr-10002
+            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+            properties:
+              columns _col0,_col1,_col2,_col3,_col4
+              columns.types string,string,string,string,bigint
+              escape.delim \
+          
+              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+              properties:
+                columns _col0,_col1,_col2,_col3,_col4
+                columns.types string,string,string,string,bigint
+                escape.delim \
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  columns _col0,_col1,_col2,_col3,_col4
+                  columns.types string:string:string:string:bigint
+                  escape.delim \
+                  hive.serialization.extend.nesting.levels true
+                  serialization.format 1
+            TotalFiles: 1
+            GatherStats: false
+            MultiFileSpray: false
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
 
 
-PREHOOK: query: select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50
+PREHOOK: query: select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Input: default@srcpart
@@ -675,7 +742,9 @@ PREHOOK: Input: default@srcpart@ds=2008-
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
-POSTHOOK: query: select * from src a join (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50
+POSTHOOK: query: select b.* from src a join
+  (select *,BLOCK__OFFSET__INSIDE__FILE from srcpart where BLOCK__OFFSET__INSIDE__FILE<100) b
+    on a.key=b.key AND b.BLOCK__OFFSET__INSIDE__FILE<50 order by ds,hr,BLOCK__OFFSET__INSIDE__FILE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Input: default@srcpart
@@ -684,39 +753,39 @@ POSTHOOK: Input: default@srcpart@ds=2008
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
-165	val_165	165	val_165	2008-04-08	11	44
-165	val_165	165	val_165	2008-04-09	11	44
-165	val_165	165	val_165	2008-04-09	12	44
-165	val_165	165	val_165	2008-04-08	12	44
-165	val_165	165	val_165	2008-04-08	11	44
-165	val_165	165	val_165	2008-04-09	11	44
-165	val_165	165	val_165	2008-04-09	12	44
-165	val_165	165	val_165	2008-04-08	12	44
-238	val_238	238	val_238	2008-04-08	11	0
-238	val_238	238	val_238	2008-04-08	12	0
-238	val_238	238	val_238	2008-04-09	12	0
-238	val_238	238	val_238	2008-04-09	11	0
-238	val_238	238	val_238	2008-04-08	11	0
-238	val_238	238	val_238	2008-04-08	12	0
-238	val_238	238	val_238	2008-04-09	12	0
-238	val_238	238	val_238	2008-04-09	11	0
-27	val_27	27	val_27	2008-04-08	12	34
-27	val_27	27	val_27	2008-04-08	11	34
-27	val_27	27	val_27	2008-04-09	11	34
-27	val_27	27	val_27	2008-04-09	12	34
-311	val_311	311	val_311	2008-04-08	11	22
-311	val_311	311	val_311	2008-04-09	11	22
-311	val_311	311	val_311	2008-04-09	12	22
-311	val_311	311	val_311	2008-04-08	12	22
-311	val_311	311	val_311	2008-04-08	11	22
-311	val_311	311	val_311	2008-04-09	11	22
-311	val_311	311	val_311	2008-04-09	12	22
-311	val_311	311	val_311	2008-04-08	12	22
-311	val_311	311	val_311	2008-04-08	11	22
-311	val_311	311	val_311	2008-04-09	11	22
-311	val_311	311	val_311	2008-04-09	12	22
-311	val_311	311	val_311	2008-04-08	12	22
-86	val_86	86	val_86	2008-04-09	11	12
-86	val_86	86	val_86	2008-04-08	11	12
-86	val_86	86	val_86	2008-04-09	12	12
-86	val_86	86	val_86	2008-04-08	12	12
+238	val_238	2008-04-08	11	0
+238	val_238	2008-04-08	11	0
+86	val_86	2008-04-08	11	12
+311	val_311	2008-04-08	11	22
+311	val_311	2008-04-08	11	22
+311	val_311	2008-04-08	11	22
+27	val_27	2008-04-08	11	34
+165	val_165	2008-04-08	11	44
+165	val_165	2008-04-08	11	44
+238	val_238	2008-04-08	12	0
+238	val_238	2008-04-08	12	0
+86	val_86	2008-04-08	12	12
+311	val_311	2008-04-08	12	22
+311	val_311	2008-04-08	12	22
+311	val_311	2008-04-08	12	22
+27	val_27	2008-04-08	12	34
+165	val_165	2008-04-08	12	44
+165	val_165	2008-04-08	12	44
+238	val_238	2008-04-09	11	0
+238	val_238	2008-04-09	11	0
+86	val_86	2008-04-09	11	12
+311	val_311	2008-04-09	11	22
+311	val_311	2008-04-09	11	22
+311	val_311	2008-04-09	11	22
+27	val_27	2008-04-09	11	34
+165	val_165	2008-04-09	11	44
+165	val_165	2008-04-09	11	44
+238	val_238	2008-04-09	12	0
+238	val_238	2008-04-09	12	0
+86	val_86	2008-04-09	12	12
+311	val_311	2008-04-09	12	22
+311	val_311	2008-04-09	12	22
+311	val_311	2008-04-09	12	22
+27	val_27	2008-04-09	12	34
+165	val_165	2008-04-09	12	44
+165	val_165	2008-04-09	12	44

Modified: hive/trunk/ql/src/test/results/clientpositive/union_remove_19.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/union_remove_19.q.out?rev=1513495&r1=1513494&r2=1513495&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/union_remove_19.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/union_remove_19.q.out Tue Aug 13 13:54:48 2013
@@ -300,7 +300,7 @@ STAGE PLANS:
             alias: inputtbl1
             Filter Operator
               predicate:
-                  expr: (key = 7.0)
+                  expr: (key = 7)
                   type: boolean
               Select Operator
                 expressions:
@@ -372,7 +372,7 @@ STAGE PLANS:
             alias: inputtbl1
             Filter Operator
               predicate:
-                  expr: (key = 7.0)
+                  expr: (key = 7)
                   type: boolean
               Select Operator
                 expressions: