You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 00:26:58 UTC

svn commit: r1629544 [21/33] - in /hive/branches/spark-new: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ c...

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge7.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge7.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge7.q.out Sun Oct  5 22:26:43 2014
@@ -141,19 +141,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -187,7 +191,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1                   
 	rawDataSize         	255                 
-	totalSize           	521                 
+	totalSize           	513                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -231,7 +235,7 @@ Partition Parameters:	 	 
 	numFiles            	2                   
 	numRows             	2                   
 	rawDataSize         	510                 
-	totalSize           	1058                
+	totalSize           	1044                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -418,19 +422,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -464,7 +472,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1                   
 	rawDataSize         	255                 
-	totalSize           	521                 
+	totalSize           	513                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -508,7 +516,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	510                 
-	totalSize           	852                 
+	totalSize           	838                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -614,18 +622,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -659,7 +671,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1                   
 	rawDataSize         	255                 
-	totalSize           	521                 
+	totalSize           	513                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -703,7 +715,7 @@ Partition Parameters:	 	 
 	numFiles            	2                   
 	numRows             	2                   
 	rawDataSize         	510                 
-	totalSize           	1058                
+	totalSize           	1044                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -791,19 +803,23 @@ POSTHOOK: Output: default@orc_merge5a@st
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -837,7 +853,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	1                   
 	rawDataSize         	255                 
-	totalSize           	521                 
+	totalSize           	513                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -881,7 +897,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	510                 
-	totalSize           	852                 
+	totalSize           	838                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out Sun Oct  5 22:26:43 2014
@@ -141,10 +141,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SI
 PREHOOK: query: -- 5 files total
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 5 files total
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -172,7 +174,7 @@ Table Parameters:	 	 
 	numFiles            	5                   
 	numRows             	15                  
 	rawDataSize         	3825                
-	totalSize           	2862                
+	totalSize           	2877                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -219,10 +221,12 @@ POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -250,7 +254,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	15                  
 	rawDataSize         	3825                
-	totalSize           	2325                
+	totalSize           	2340                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out Sun Oct  5 22:26:43 2014
@@ -200,18 +200,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -245,7 +249,7 @@ Partition Parameters:	 	 
 	numFiles            	4                   
 	numRows             	4                   
 	rawDataSize         	1020                
-	totalSize           	2092                
+	totalSize           	2060                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -289,7 +293,7 @@ Partition Parameters:	 	 
 	numFiles            	4                   
 	numRows             	8                   
 	rawDataSize         	2040                
-	totalSize           	2204                
+	totalSize           	2188                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -385,18 +389,22 @@ POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=80.0
 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=80.0
 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@st=0.8
 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0)
@@ -430,7 +438,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	4                   
 	rawDataSize         	1020                
-	totalSize           	1851                
+	totalSize           	1819                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -474,7 +482,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	8                   
 	rawDataSize         	2040                
-	totalSize           	1944                
+	totalSize           	1928                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_create.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_create.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_create.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_create.q.out Sun Oct  5 22:26:43 2014
@@ -181,6 +181,6 @@ POSTHOOK: query: SELECT strct from parqu
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_create
 #### A masked pattern was here ####
-{"a":"one","b":"two"}
-{"a":"three","b":"four"}
-{"a":"five","b":"six"}
+{"A":"one","B":"two"}
+{"A":"three","B":"four"}
+{"A":"five","B":"six"}

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_types.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_types.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_types.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/parquet_types.q.out Sun Oct  5 22:26:43 2014
@@ -15,9 +15,14 @@ PREHOOK: query: CREATE TABLE parquet_typ
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@parquet_types_staging
@@ -30,9 +35,14 @@ POSTHOOK: query: CREATE TABLE parquet_ty
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_types_staging
@@ -45,7 +55,10 @@ PREHOOK: query: CREATE TABLE parquet_typ
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) STORED AS PARQUET
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -59,7 +72,10 @@ POSTHOOK: query: CREATE TABLE parquet_ty
   cstring1 string,
   t timestamp,
   cchar char(5),
-  cvarchar varchar(10)
+  cvarchar varchar(10),
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>
 ) STORED AS PARQUET
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
@@ -88,6 +104,9 @@ POSTHOOK: Lineage: parquet_types.csmalli
 POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ]
+POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:l1, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map<string,varchar(3)>, comment:null), ]
+POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct<c1:int,c2:char(1)>, comment:null), ]
 POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ]
 PREHOOK: query: SELECT * FROM parquet_types
 PREHOOK: type: QUERY
@@ -97,27 +116,56 @@ POSTHOOK: query: SELECT * FROM parquet_t
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_types
 #### A masked pattern was here ####
-100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a	a  
-101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab	ab  
-102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc	abc
-103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd	abcd
-104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde
-105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef
-106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg
-107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh
-108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	abcdefghij
-109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef
-110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede
-111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded
-112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd
-113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc
-114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b
-115	1	1	1.0	4.5	tuv	2026-04-04 16:16:16.161616161	rstuv	abcded
-116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded
-117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded
-118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede
-119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede
-120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde
+100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a    	a  	{"k1":"v1"}	[101,200]	{"c1":10,"c2":"a"}
+101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab   	ab 	{"k2":"v2"}	[102,200]	{"c1":10,"c2":"d"}
+102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc  	abc	{"k3":"v3"}	[103,200]	{"c1":10,"c2":"g"}
+103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd 	abcd	{"k4":"v4"}	[104,200]	{"c1":10,"c2":"j"}
+104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde	{"k5":"v5"}	[105,200]	{"c1":10,"c2":"m"}
+105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef	{"k6":"v6"}	[106,200]	{"c1":10,"c2":"p"}
+106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg	{"k7":"v7"}	[107,200]	{"c1":10,"c2":"s"}
+107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh	{"k8":"v8"}	[108,200]	{"c1":10,"c2":"v"}
+108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	abcdefghij	{"k9":"v9"}	[109,200]	{"c1":10,"c2":"y"}
+109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef	{"k10":"v10"}	[110,200]	{"c1":10,"c2":"b"}
+110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede	{"k11":"v11"}	[111,200]	{"c1":10,"c2":"e"}
+111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded	{"k12":"v12"}	[112,200]	{"c1":10,"c2":"h"}
+112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd	{"k13":"v13"}	[113,200]	{"c1":10,"c2":"k"}
+113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc	{"k14":"v14"}	[114,200]	{"c1":10,"c2":"n"}
+114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b	{"k15":"v15"}	[115,200]	{"c1":10,"c2":"q"}
+115	1	1	1.0	4.5	qrs	2026-04-04 16:16:16.161616161	rstuv	abcded	{"k16":"v16"}	[116,200]	{"c1":10,"c2":"q"}
+116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded	{"k17":"v17"}	[117,200]	{"c1":10,"c2":"w"}
+117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded	{"k18":"v18"}	[118,200]	{"c1":10,"c2":"z"}
+118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede	{"k19":"v19"}	[119,200]	{"c1":10,"c2":"c"}
+119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	{"k20":"v20"}	[120,200]	{"c1":10,"c2":"f"}
+120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	{"k21":"v21"}	[121,200]	{"c1":10,"c2":"i"}
+PREHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+a    	1	a  	3
+ab   	2	ab 	3
+abc  	3	abc	3
+abcd 	4	abcd	4
+abcde	5	abcde	5
+abcde	5	abcdef	6
+abcde	5	abcdefg	7
+bcdef	5	abcdefgh	8
+cdefg	5	abcdefghij	10
+klmno	5	abcdedef	8
+pqrst	5	abcdede	7
+nopqr	5	abcded	6
+opqrs	5	abcdd	5
+pqrst	5	abc	3
+qrstu	5	b	1
+rstuv	5	abcded	6
+stuvw	5	abcded	6
+tuvwx	5	abcded	6
+uvwzy	5	abcdede	7
+vwxyz	5	abcdede	7
+wxyza	5	abcde	5
 PREHOOK: query: SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out Sun Oct  5 22:26:43 2014
@@ -1,8 +1,12 @@
-PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string)
+PREHOOK: query: -- SORT_BEFORE_DIFF
+
+create table partition_test_partitioned(key string, value string) partitioned by (dt string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@partition_test_partitioned
-POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string)
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+
+create table partition_test_partitioned(key string, value string) partitioned by (dt string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@partition_test_partitioned
@@ -57,33 +61,21 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from partition_test_partitioned where dt >=100 and dt <= 102
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: partition_test_partitioned
-            Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: partition_test_partitioned
+          Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: select * from partition_test_partitioned where dt >=100 and dt <= 102
 PREHOOK: type: QUERY

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_constant_where.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_constant_where.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_constant_where.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_constant_where.q.out Sun Oct  5 22:26:43 2014
@@ -16,31 +16,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out Sun Oct  5 22:26:43 2014
@@ -145,7 +145,7 @@ STAGE PLANS:
             predicate: (_col7 = 3) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              expressions: _col0 (type: int), _col6 (type: int), _col7 (type: int)
+              expressions: _col0 (type: int), _col6 (type: int), 3 (type: int)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               File Output Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/ppr_pushdown3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/ppr_pushdown3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/ppr_pushdown3.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/ppr_pushdown3.q.out Sun Oct  5 22:26:43 2014
@@ -2132,33 +2132,21 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select key from srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: select key from srcpart
 PREHOOK: type: QUERY

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out Sun Oct  5 22:26:43 2014
@@ -306,6 +306,7 @@ group by l_shipdate
 order by l_shipdate
 PREHOOK: type: QUERY
 PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__
+PREHOOK: Input: default@lineitem
 #### A masked pattern was here ####
 POSTHOOK: query: select l_shipdate, count(l_shipdate)
 from lineitem
@@ -313,6 +314,7 @@ group by l_shipdate
 order by l_shipdate
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__
+POSTHOOK: Input: default@lineitem
 #### A masked pattern was here ####
 1992-04-27	1
 1992-07-02	1
@@ -654,6 +656,7 @@ group by year(l_shipdate), month(l_shipd
 order by year, month
 PREHOOK: type: QUERY
 PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__
+PREHOOK: Input: default@lineitem
 #### A masked pattern was here ####
 POSTHOOK: query: select year(l_shipdate) as year,
         month(l_shipdate) as month,
@@ -663,6 +666,7 @@ group by year(l_shipdate), month(l_shipd
 order by year, month
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__
+POSTHOOK: Input: default@lineitem
 #### A masked pattern was here ####
 1992	4	1
 1992	7	3
@@ -2628,10 +2632,12 @@ STAGE PLANS:
 PREHOOK: query: select key, count(key) from tbl group by key order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@default__tbl_tbl_key_idx__
+PREHOOK: Input: default@tbl
 #### A masked pattern was here ####
 POSTHOOK: query: select key, count(key) from tbl group by key order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@default__tbl_tbl_key_idx__
+POSTHOOK: Input: default@tbl
 #### A masked pattern was here ####
 1	1
 2	3

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/query_properties.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/query_properties.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/query_properties.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/query_properties.q.out Sun Oct  5 22:26:43 2014
@@ -1,5 +1,6 @@
 PREHOOK: query: select * from src a join src b on a.key = b.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: true
 Has Group By: false
@@ -11,6 +12,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src group by src.key, src.value limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: true
@@ -22,6 +24,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src order by src.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: false
@@ -33,6 +36,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src sort by src.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: false
@@ -44,6 +48,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select a.key, sum(b.value) from src a join src b on a.key = b.key group by a.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: true
 Has Group By: true
@@ -55,6 +60,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select transform(*) using 'cat' from src limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: false
@@ -66,6 +72,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src distribute by src.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: false
@@ -77,6 +84,7 @@ Has Distribute By: true
 Has Cluster By: false
 PREHOOK: query: select * from src cluster by src.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: false
 Has Group By: false
@@ -88,6 +96,7 @@ Has Distribute By: false
 Has Cluster By: true
 PREHOOK: query: select key, sum(value) from (select a.key as key, b.value as value from src a join src b on a.key = b.key) c group by key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: true
 Has Group By: true
@@ -99,6 +108,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src a join src b on a.key = b.key order by a.key limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: true
 Has Group By: false
@@ -110,6 +120,7 @@ Has Distribute By: false
 Has Cluster By: false
 PREHOOK: query: select * from src a join src b on a.key = b.key distribute by a.key sort by a.key, b.value limit 0
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
 #### A masked pattern was here ####
 Has Join: true
 Has Group By: false

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/quote1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/quote1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/quote1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/quote1.q.out Sun Oct  5 22:26:43 2014
@@ -108,18 +108,24 @@ POSTHOOK: query: EXPLAIN
 SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: int
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          Filter Operator
+            predicate: (table = '2008-04-08') (type: boolean)
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Select Operator
+              expressions: location (type: int), type (type: string), '2008-04-08' (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              ListSink
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/quote2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/quote2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/quote2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/quote2.q.out Sun Oct  5 22:26:43 2014
@@ -8,6 +8,7 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -23,6 +24,7 @@ SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -40,12 +42,12 @@ STAGE PLANS:
           alias: src
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: 'abc' (type: string), 'abc' (type: string), 'abc'' (type: string), 'abc"' (type: string), 'abc\' (type: string), 'abc\' (type: string), 'abc\'' (type: string), 'abc\"' (type: string), 'abc\\' (type: string), 'abc\\' (type: string), 'abc\\'' (type: string), 'abc\\"' (type: string), 'abc\\\' (type: string), 'abc\\\' (type: string), 'abc""""\' (type: string), 'abc''''\' (type: string), 'awk '{print NR"\t"$0}'' (type: string), 'tab	tab' (type: string), 'tab	tab' (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
-            Statistics: Num rows: 500 Data size: 857000 Basic stats: COMPLETE Column stats: COMPLETE
+            expressions: 'abc' (type: string), 'abc' (type: string), 'abc'' (type: string), 'abc"' (type: string), 'abc\' (type: string), 'abc\' (type: string), 'abc\'' (type: string), 'abc\"' (type: string), 'abc\\' (type: string), 'abc\\' (type: string), 'abc\\'' (type: string), 'abc\\"' (type: string), 'abc\\\' (type: string), 'abc\\\' (type: string), 'abc""""\' (type: string), 'abc''''\' (type: string), 'mysql_%\_\%' (type: string), 'mysql\\_\\\%' (type: string), 'awk '{print NR"\t"$0}'' (type: string), 'tab	tab' (type: string), 'tab	tab' (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+            Statistics: Num rows: 500 Data size: 952500 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 1714 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 1905 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT
@@ -57,6 +59,7 @@ PREHOOK: query: SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -73,6 +76,7 @@ POSTHOOK: query: SELECT
     'abc\\\\\'',  "abc\\\\\"",
     'abc\\\\\\',  "abc\\\\\\",
     'abc""""\\',  "abc''''\\",
+    'mysql_%\\_\%', 'mysql\\\_\\\\\%',
     "awk '{print NR\"\\t\"$0}'",
     'tab\ttab',   "tab\ttab"
 FROM src
@@ -80,4 +84,4 @@ LIMIT 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-abc	abc	abc'	abc"	abc\	abc\	abc\'	abc\"	abc\\	abc\\	abc\\'	abc\\"	abc\\\	abc\\\	abc""""\	abc''''\	awk '{print NR"\t"$0}'	tab	tab	tab	tab
+abc	abc	abc'	abc"	abc\	abc\	abc\'	abc\"	abc\\	abc\\	abc\\'	abc\\"	abc\\\	abc\\\	abc""""\	abc''''\	mysql_%\_\%	mysql\\_\\\%	awk '{print NR"\t"$0}'	tab	tab	tab	tab

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/quotedid_basic.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/quotedid_basic.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/quotedid_basic.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/quotedid_basic.q.out Sun Oct  5 22:26:43 2014
@@ -30,33 +30,21 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            Select Operator
-              expressions: x+1 (type: string), y&y (type: string), !@#$%^&*()_q (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: t1
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          Select Operator
+            expressions: x+1 (type: string), y&y (type: string), !@#$%^&*()_q (type: string)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            ListSink
 
 PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1'
 PREHOOK: type: QUERY

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/regex_col.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/regex_col.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/regex_col.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/regex_col.q.out Sun Oct  5 22:26:43 2014
@@ -28,33 +28,21 @@ POSTHOOK: query: EXPLAIN
 SELECT `..` FROM srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: ds (type: string), hr (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT srcpart.`..` FROM srcpart
@@ -63,33 +51,21 @@ POSTHOOK: query: EXPLAIN
 SELECT srcpart.`..` FROM srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: ds (type: string), hr (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `..` FROM srcpart a JOIN srcpart b
@@ -289,33 +265,21 @@ POSTHOOK: query: EXPLAIN
 SELECT `.e.` FROM srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `d.*` FROM srcpart
@@ -324,33 +288,21 @@ POSTHOOK: query: EXPLAIN
 SELECT `d.*` FROM srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: ds (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: ds (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `(ds)?+.+` FROM srcpart
@@ -359,33 +311,21 @@ POSTHOOK: query: EXPLAIN
 SELECT `(ds)?+.+` FROM srcpart
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: srcpart
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string), hr (type: string)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `(ds|hr)?+.+` FROM srcpart ORDER BY key, value LIMIT 10

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority.q.out Sun Oct  5 22:26:43 2014
@@ -61,8 +61,8 @@ POSTHOOK: Input: default@dynpart
 POSTHOOK: Input: default@dynpart@value=0
 POSTHOOK: Input: default@dynpart@value=1
 #### A masked pattern was here ####
-20
 10
+20
 PREHOOK: query: select key from src where (key = 10) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/schemeAuthority2.q.out Sun Oct  5 22:26:43 2014
@@ -48,6 +48,6 @@ POSTHOOK: Input: default@dynpart
 POSTHOOK: Input: default@dynpart@value=0/value2=clusterA
 POSTHOOK: Input: default@dynpart@value=0/value2=clusterB
 #### A masked pattern was here ####
-clusterB	20
 clusterA	10
+clusterB	20
 #### A masked pattern was here ####

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/select_dummy_source.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/select_dummy_source.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/select_dummy_source.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/select_dummy_source.q.out Sun Oct  5 22:26:43 2014
@@ -5,34 +5,22 @@ POSTHOOK: query: explain
 select 'a', 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: _dummy_table
-            Row Limit Per Split: 1
-            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              expressions: 'a' (type: string), 100 (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: _dummy_table
+          Row Limit Per Split: 1
+          Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+          Select Operator
+            expressions: 'a' (type: string), 100 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+            ListSink
 
 PREHOOK: query: select 'a', 100
 PREHOOK: type: QUERY
@@ -52,34 +40,22 @@ explain
 select 1 + 1
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: _dummy_table
-            Row Limit Per Split: 1
-            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              expressions: 2 (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: _dummy_table
+          Row Limit Per Split: 1
+          Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+          Select Operator
+            expressions: 2 (type: int)
+            outputColumnNames: _col0
+            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+            ListSink
 
 PREHOOK: query: select 1 + 1
 PREHOOK: type: QUERY
@@ -265,34 +241,22 @@ explain
 select 2 + 3,x from (select 1 + 2 x) X
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: _dummy_table
-            Row Limit Per Split: 1
-            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              expressions: 5 (type: int), (1 + 2) (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: _dummy_table
+          Row Limit Per Split: 1
+          Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+          Select Operator
+            expressions: 5 (type: int), 3 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE
+            ListSink
 
 PREHOOK: query: select 2 + 3,x from (select 1 + 2 x) X
 PREHOOK: type: QUERY

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/serde_user_properties.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/serde_user_properties.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/serde_user_properties.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/serde_user_properties.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats10.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats10.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats10.q.out Sun Oct  5 22:26:43 2014
@@ -373,7 +373,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: bucket3_1
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats11.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats11.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats11.q.out Sun Oct  5 22:26:43 2014
@@ -535,8 +535,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.srcbucket_mapjoin
               numFiles 2
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -556,8 +554,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.srcbucket_mapjoin
                 numFiles 2
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats12.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats12.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/stats12.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/stats12.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats13.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats13.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/stats13.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/stats13.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats2.q.out Sun Oct  5 22:26:43 2014
@@ -27,23 +27,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.analyze_t1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.analyze_t1
 
   Stage: Stage-0
     Move Operator
@@ -136,7 +127,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_t1
-            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats4.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats4.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats4.q.out Sun Oct  5 22:26:43 2014
@@ -48,11 +48,20 @@ insert overwrite table nzhang_part2 part
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6
+  Stage-5
+  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
+  Stage-14 depends on stages: Stage-2 , consists of Stage-11, Stage-10, Stage-12
+  Stage-11
+  Stage-1 depends on stages: Stage-11, Stage-10, Stage-13
+  Stage-9 depends on stages: Stage-1
+  Stage-10
+  Stage-12
+  Stage-13 depends on stages: Stage-12
 
 STAGE PLANS:
   Stage: Stage-2
@@ -68,12 +77,14 @@ STAGE PLANS:
                 expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col2 (type: string), _col3 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part1
             Filter Operator
               predicate: (ds > '2008-04-08') (type: boolean)
               Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
@@ -83,21 +94,21 @@ STAGE PLANS:
                 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part1
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part2
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -119,23 +130,40 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col2 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col2 (type: string)
-              Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part2
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part1
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part1
+
+  Stage: Stage-7
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-14
+    Conditional Operator
+
+  Stage: Stage-11
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-1
     Move Operator
@@ -150,9 +178,39 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.nzhang_part2
 
-  Stage: Stage-5
+  Stage: Stage-9
     Stats-Aggr Operator
 
+  Stage: Stage-10
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part2
+
+  Stage: Stage-12
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part2
+
+  Stage: Stage-13
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
 insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats7.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats7.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats7.q.out Sun Oct  5 22:26:43 2014
@@ -47,7 +47,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats8.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats8.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats8.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats8.q.out Sun Oct  5 22:26:43 2014
@@ -47,7 +47,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator
@@ -157,7 +157,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator
@@ -230,7 +230,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator
@@ -303,7 +303,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator
@@ -376,7 +376,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: analyze_srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats-Aggr Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out Sun Oct  5 22:26:43 2014
@@ -20,8 +20,13 @@ POSTHOOK: query: explain insert overwrit
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -37,23 +42,23 @@ STAGE PLANS:
                 expressions: 'no_such_value' (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.tmptable
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.tmptable
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -70,6 +75,36 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.tmptable
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.tmptable
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/stats_only_null.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/stats_only_null.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/stats_only_null.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/stats_only_null.q.out Sun Oct  5 22:26:43 2014
@@ -322,16 +322,20 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_null
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_null
 #### A masked pattern was here ####
 10	8	8	10	10
 PREHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_null_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_null_part
 #### A masked pattern was here ####
 10	8	8	10	10
 PREHOOK: query: drop table stats_null_part

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_alias.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_alias.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_alias.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_alias.q.out Sun Oct  5 22:26:43 2014
@@ -97,33 +97,21 @@ SELECT * FROM
 ) as src2
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: s
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: s
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: SELECT * FROM
 ( SELECT * FROM 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_multiinsert.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_multiinsert.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/subquery_multiinsert.q.out Sun Oct  5 22:26:43 2014
@@ -99,7 +99,7 @@ STAGE PLANS:
             predicate: (_col0 = 0) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              expressions: _col0 (type: bigint)
+              expressions: 0 (type: bigint)
               outputColumnNames: _col0
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Group By Operator
@@ -566,7 +566,7 @@ STAGE PLANS:
             predicate: (_col0 = 0) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              expressions: _col0 (type: bigint)
+              expressions: 0 (type: bigint)
               outputColumnNames: _col0
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Group By Operator