You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 00:26:58 UTC

svn commit: r1629544 [25/33] - in /hive/branches/spark-new: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ c...

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out Sun Oct  5 22:26:43 2014
@@ -291,11 +291,11 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col4 (type: tinyint), -1 (type: int)
+                        key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col4 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
@@ -357,11 +357,11 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float)
+                        key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
                         sort order: +++
                         Map-reduce partition columns: _col4 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
@@ -644,11 +644,11 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col4 (type: tinyint), -1 (type: int)
+                        key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col4 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
@@ -710,11 +710,11 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float)
+                        key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
                         sort order: +++
                         Map-reduce partition columns: _col4 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract
@@ -843,8 +843,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
-	numRows             	16                  
-	rawDataSize         	415                 
+	numRows             	32                  
+	rawDataSize         	830                 
 	totalSize           	862                 
 #### A masked pattern was here ####
 	 	 
@@ -887,8 +887,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
-	numRows             	3                   
-	rawDataSize         	78                  
+	numRows             	6                   
+	rawDataSize         	156                 
 	totalSize           	162                 
 #### A masked pattern was here ####
 	 	 
@@ -931,8 +931,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
-	numRows             	7                   
-	rawDataSize         	181                 
+	numRows             	14                  
+	rawDataSize         	362                 
 	totalSize           	376                 
 #### A masked pattern was here ####
 	 	 
@@ -975,8 +975,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	2                   
-	numRows             	3                   
-	rawDataSize         	78                  
+	numRows             	6                   
+	rawDataSize         	156                 
 	totalSize           	162                 
 #### A masked pattern was here ####
 	 	 
@@ -1018,8 +1018,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
-	numRows             	16                  
-	rawDataSize         	415                 
+	numRows             	32                  
+	rawDataSize         	830                 
 	totalSize           	862                 
 #### A masked pattern was here ####
 	 	 
@@ -1061,8 +1061,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
-	numRows             	3                   
-	rawDataSize         	78                  
+	numRows             	6                   
+	rawDataSize         	156                 
 	totalSize           	162                 
 #### A masked pattern was here ####
 	 	 
@@ -1104,8 +1104,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
-	numRows             	16                  
-	rawDataSize         	415                 
+	numRows             	32                  
+	rawDataSize         	830                 
 	totalSize           	862                 
 #### A masked pattern was here ####
 	 	 
@@ -1147,8 +1147,8 @@ Protect Mode:       	None               
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	8                   
-	numRows             	3                   
-	rawDataSize         	78                  
+	numRows             	6                   
+	rawDataSize         	156                 
 	totalSize           	162                 
 #### A masked pattern was here ####
 	 	 
@@ -1381,6 +1381,261 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: over1k
+                  Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col2 (type: int)
+                      sort order: +
+                      Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (_col0 is null or (_col0 = 27)) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col4 (type: tinyint)
+                        sort order: +
+                        Map-reduce partition columns: _col4 (type: tinyint)
+                        Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Extract
+                Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.over1k_part2
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds foo
+            t 
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.over1k_part2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: over1k
+                  Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (t is null or (t = 27)) (type: boolean)
+                    Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                      outputColumnNames: si, i, b, f, t
+                      Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                        Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                          sort order: +++++
+                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                          Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.over1k_part2
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds foo
+            t 
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.over1k_part2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: over1k
+                  Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (t is null or (t = 27)) (type: boolean)
+                    Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                      outputColumnNames: si, i, b, f, t
+                      Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                        Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                          sort order: +++++
+                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                          Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col4 (type: tinyint)
+                    sort order: +
+                    Map-reduce partition columns: _col4 (type: tinyint)
+                    Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Extract
+                Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.over1k_part2
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds foo
+            t 
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.over1k_part2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
 PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k
@@ -1803,11 +2058,11 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float)
+                        key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
                         sort order: +++
                         Map-reduce partition columns: _col4 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+                        value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Extract

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into1.q.out Sun Oct  5 22:26:43 2014
@@ -104,6 +104,31 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 10226524244
+PREHOOK: query: explain 
+select count(*) from insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(*) from insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+100
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
@@ -198,11 +223,27 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 20453048488
-PREHOOK: query: SELECT COUNT(*) FROM insert_into1
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(*) FROM insert_into1
+POSTHOOK: query: select count(*) from insert_into1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
@@ -301,6 +342,31 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 -826625916
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+10
 PREHOOK: query: DROP TABLE insert_into1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into1

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_into2.q.out Sun Oct  5 22:26:43 2014
@@ -97,6 +97,31 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@insert_into2@ds=1
 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select count (*) from insert_into2 where ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count (*) from insert_into2 where ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count (*) from insert_into2 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: select count (*) from insert_into2 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+100
 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -107,15 +132,29 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@insert_into2@ds=1
 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@insert_into2
-PREHOOK: Input: default@insert_into2@ds=1
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into2
-POSTHOOK: Input: default@insert_into2@ds=1
 #### A masked pattern was here ####
 200
 PREHOOK: query: SELECT SUM(HASH(c)) FROM (
@@ -237,6 +276,31 @@ POSTHOOK: Input: default@insert_into2@ds
 POSTHOOK: Input: default@insert_into2@ds=2
 #### A masked pattern was here ####
 -36239931656
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+100
 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src LIMIT 50
 PREHOOK: type: QUERY
@@ -341,6 +405,31 @@ POSTHOOK: Input: default@insert_into2@ds
 POSTHOOK: Input: default@insert_into2@ds=2
 #### A masked pattern was here ####
 -27100860056
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+50
 PREHOOK: query: DROP TABLE insert_into2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into2

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out Sun Oct  5 22:26:43 2014
@@ -10,7 +10,7 @@ PREHOOK: query: create table acid_iot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_iot
@@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_iot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_iot

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out Sun Oct  5 22:26:43 2014
@@ -1,8 +1,8 @@
-PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc
+PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_iud
-POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc
+POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_iud

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out Sun Oct  5 22:26:43 2014
@@ -1,12 +1,12 @@
 PREHOOK: query: create table ivdp(i int,
                  de decimal(5,2),
-                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc
+                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@ivdp
 POSTHOOK: query: create table ivdp(i int,
                  de decimal(5,2),
-                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc
+                 vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ivdp

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out Sun Oct  5 22:26:43 2014
@@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivnp(t
                  b boolean,
                  s string,
                  vc varchar(128),
-                 ch char(12)) clustered by (i) into 2 buckets stored as orc
+                 ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_ivnp
@@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivnp(
                  b boolean,
                  s string,
                  vc varchar(128),
-                 ch char(12)) clustered by (i) into 2 buckets stored as orc
+                 ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivnp

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out Sun Oct  5 22:26:43 2014
@@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_ivot
@@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivot(
     ctimestamp1 TIMESTAMP,
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
-    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc
+    cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivot

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out Sun Oct  5 22:26:43 2014
@@ -9,7 +9,7 @@ PREHOOK: query: create table acid_ivp(ti
                  dt date,
                  s string,
                  vc varchar(128),
-                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc
+                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_ivp
@@ -24,7 +24,7 @@ POSTHOOK: query: create table acid_ivp(t
                  dt date,
                  s string,
                  vc varchar(128),
-                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc
+                 ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivp

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out Sun Oct  5 22:26:43 2014
@@ -1,20 +1,22 @@
-PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc
+PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_ivtt
-POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc
+POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acid_ivtt
 PREHOOK: query: insert into table acid_ivtt values 
     (1, 109.23, 'mary had a little lamb'),
-    (429496729, 0.14, 'its fleece was white as snow')
+    (429496729, 0.14, 'its fleece was white as snow'),
+    (-29496729, -0.14, 'negative values test')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1
 PREHOOK: Output: default@acid_ivtt
 POSTHOOK: query: insert into table acid_ivtt values 
     (1, 109.23, 'mary had a little lamb'),
-    (429496729, 0.14, 'its fleece was white as snow')
+    (429496729, 0.14, 'its fleece was white as snow'),
+    (-29496729, -0.14, 'negative values test')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@acid_ivtt
@@ -29,5 +31,6 @@ POSTHOOK: query: select i, de, vc from a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid_ivtt
 #### A masked pattern was here ####
+-29496729	-0.14	negative values test
 1	109.23	mary had a little lamb
 429496729	0.14	its fleece was white as snow

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join0.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join0.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join0.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join0.q.out Sun Oct  5 22:26:43 2014
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
@@ -61,7 +61,7 @@ STAGE PLANS:
                         value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
-              Join Operator
+              Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 condition expressions:
@@ -97,7 +97,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: EXPLAIN FORMATTED
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
@@ -115,7 +115,7 @@ SELECT src1.key as k1, src1.value as v1,
   SORT BY k1, v1, k2, v2
 POSTHOOK: type: QUERY
 #### A masked pattern was here ####
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
   (SELECT * FROM src WHERE src.key < 10) src1 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/join1.q.out Sun Oct  5 22:26:43 2014
@@ -56,7 +56,7 @@ STAGE PLANS:
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
-              Join Operator
+              Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 condition expressions:

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out Sun Oct  5 22:26:43 2014
@@ -673,9 +673,11 @@ STAGE PLANS:
 
 PREHOOK: query: select key,value from src order by key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 POSTHOOK: query: select key,value from src order by key limit 0
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 PREHOOK: query: -- 2MR (applied to last RS)
 explain
@@ -887,7 +889,7 @@ STAGE PLANS:
                         value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
-              Join Operator
+              Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 condition expressions:

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out Sun Oct  5 22:26:43 2014
@@ -67,9 +67,6 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-2
     Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -84,12 +81,14 @@ STAGE PLANS:
                       expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col2 (type: string), _col3 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                      File Output Operator
+                        compressed: false
                         Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.nzhang_part1
                   Filter Operator
                     predicate: (ds > '2008-04-08') (type: boolean)
                     Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
@@ -97,36 +96,14 @@ STAGE PLANS:
                       expressions: key (type: string), value (type: string), hr (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col2 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col2 (type: string)
+                      File Output Operator
+                        compressed: false
                         Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Extract
-                Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.nzhang_part1
-        Reducer 3 
-            Reduce Operator Tree:
-              Extract
-                Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.nzhang_part2
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.nzhang_part2
 
   Stage: Stage-3
     Dependency Collection

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out Sun Oct  5 22:26:43 2014
@@ -53,8 +53,6 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -66,24 +64,14 @@ STAGE PLANS:
                     expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
                     outputColumnNames: _col0, _col1, _col2, _col3
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col2 (type: string), _col3 (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Extract
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.nzhang_part3
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.nzhang_part3
 
   Stage: Stage-2
     Dependency Collection

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out Sun Oct  5 22:26:43 2014
@@ -120,6 +120,8 @@ STAGE PLANS:
                         0 dec (type: decimal(4,2))
                         1 dec (type: decimal(4,0))
                       outputColumnNames: _col0, _col4
+                      input vertices:
+                        1 Map 1
                       Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0))

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out Sun Oct  5 22:26:43 2014
@@ -352,9 +352,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 9999	9999	1999.8	9999	9999	9999	9999	9999
 PREHOOK: query: explain
@@ -375,9 +377,11 @@ STAGE PLANS:
 
 PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 65536	65791	4294967296	4294967551	0.009999999776482582	99.9800033569336	0.01	50.0
 PREHOOK: query: explain 
@@ -398,9 +402,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 9489	9489	1897.8	9489	9489	9489	9489	9489
 PREHOOK: query: explain
@@ -421,9 +427,11 @@ STAGE PLANS:
 
 PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 65536	65791	4294967296	4294967551	0.009999999776482582	99.9800033569336	0.01	50.0
 PREHOOK: query: explain select count(ts) from stats_tbl_part

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadataonly1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadataonly1.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/tez/metadataonly1.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mrr.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mrr.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mrr.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/mrr.q.out Sun Oct  5 22:26:43 2014
@@ -439,7 +439,7 @@ STAGE PLANS:
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
-              Join Operator
+              Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 condition expressions:
@@ -867,6 +867,8 @@ STAGE PLANS:
                         0 key (type: string)
                         1 key (type: string)
                       outputColumnNames: _col5, _col6
+                      input vertices:
+                        1 Map 1
                       Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col5 (type: string), _col6 (type: string)
@@ -1424,7 +1426,7 @@ STAGE PLANS:
                   value expressions: _col1 (type: bigint)
         Reducer 4 
             Reduce Operator Tree:
-              Join Operator
+              Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                      Inner Join 0 to 2
@@ -1706,6 +1708,8 @@ STAGE PLANS:
                         0 _col0 (type: string)
                         1 key (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3
+                      input vertices:
+                        0 Reducer 3
                       Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string)

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_analyze.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_analyze.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_analyze.q.out Sun Oct  5 22:26:43 2014
@@ -73,9 +73,11 @@ POSTHOOK: Lineage: orc_create_people.sta
 POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ]
 PREHOOK: query: analyze table orc_create_people compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 POSTHOOK: query: analyze table orc_create_people compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 PREHOOK: query: desc formatted orc_create_people
 PREHOOK: type: DESCTABLE
@@ -105,7 +107,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	100                 
 	rawDataSize         	52600               
-	totalSize           	3123                
+	totalSize           	3121                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -195,7 +197,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	100                 
 	rawDataSize         	52600               
-	totalSize           	3123                
+	totalSize           	3121                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -269,11 +271,13 @@ POSTHOOK: Lineage: orc_create_people PAR
 POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=Or
@@ -581,11 +585,13 @@ POSTHOOK: Lineage: orc_create_people PAR
 POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=Or
@@ -618,10 +624,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	21980               
-	totalSize           	4963                
+	rawDataSize         	21950               
+	totalSize           	2024                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -663,10 +669,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	22048               
-	totalSize           	5051                
+	rawDataSize         	22050               
+	totalSize           	2043                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -771,10 +777,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	21980               
-	totalSize           	4963                
+	rawDataSize         	21950               
+	totalSize           	2024                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -816,10 +822,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	22048               
-	totalSize           	5051                
+	rawDataSize         	22050               
+	totalSize           	2043                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -942,12 +948,14 @@ POSTHOOK: Input: default@orc_create_peop
 POSTHOOK: Output: default@orc_create_people
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=OH
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=OH

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge1.q.out Sun Oct  5 22:26:43 2014
@@ -146,7 +146,7 @@ Partition Parameters:	 	 
 	numFiles            	6                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	3046                
+	totalSize           	3037                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -315,7 +315,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	1328                
+	totalSize           	1325                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -476,7 +476,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	2401                
+	totalSize           	2392                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge2.q.out Sun Oct  5 22:26:43 2014
@@ -33,8 +33,6 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -46,24 +44,14 @@ STAGE PLANS:
                     expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col2 (type: int), _col3 (type: int)
-                      sort order: ++
-                      Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int)
-        Reducer 2 
-            Reduce Operator Tree:
-              Extract
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                      serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                      name: default.orcfile_merge2a
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                          name: default.orcfile_merge2a
 
   Stage: Stage-2
     Dependency Collection

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge5.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge5.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge5.q.out Sun Oct  5 22:26:43 2014
@@ -97,10 +97,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SI
 PREHOOK: query: -- 3 files total
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 3 files total
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -128,7 +130,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -265,10 +267,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SI
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -296,7 +300,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -335,9 +339,11 @@ POSTHOOK: Lineage: orc_merge5b.ts SIMPLE
 POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -365,7 +371,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -425,10 +431,12 @@ POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -456,7 +464,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge6.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge6.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/tez/orc_merge6.q.out Sun Oct  5 22:26:43 2014
@@ -115,19 +115,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -162,7 +166,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -207,7 +211,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -375,19 +379,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -422,7 +430,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -467,7 +475,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -534,18 +542,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -580,7 +592,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -625,7 +637,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -711,19 +723,23 @@ POSTHOOK: Output: default@orc_merge5a@ye
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -758,7 +774,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -803,7 +819,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information