You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2015/09/14 10:22:24 UTC

[01/24] hive git commit: HIVE-11696: Exception when table-level serde is Parquet while partition-level serde is JSON (Aihua Xu, reviewed by Chao Sun)

Repository: hive
Updated Branches:
  refs/heads/llap cb9fab749 -> b0154f4c4


HIVE-11696: Exception when table-level serde is Parquet while partition-level serde is JSON (Aihua Xu, reviewed by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d94c0f65
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d94c0f65
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d94c0f65

Branch: refs/heads/llap
Commit: d94c0f65d85d1a0c6b31a75ecf1d1b805d823a32
Parents: d51c62a
Author: Chao Sun <su...@apache.org>
Authored: Wed Sep 9 12:56:24 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Wed Sep 9 12:56:45 2015 -0700

----------------------------------------------------------------------
 .../serde/ParquetHiveArrayInspector.java        |  12 +
 .../parquet_mixed_partition_formats.q           |  42 +++
 .../parquet_mixed_partition_formats.q.out       | 303 +++++++++++++++++++
 3 files changed, 357 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d94c0f65/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index bde0dcb..05e92b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -68,6 +68,10 @@ public class ParquetHiveArrayInspector implements SettableListObjectInspector {
       }
     }
 
+    if (data instanceof List) {
+      return ((List)data).get(index);
+    }
+
     throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
   }
 
@@ -86,6 +90,10 @@ public class ParquetHiveArrayInspector implements SettableListObjectInspector {
       return array.length;
     }
 
+    if (data instanceof List) {
+      return ((List)data).size();
+    }
+
     throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
   }
 
@@ -109,6 +117,10 @@ public class ParquetHiveArrayInspector implements SettableListObjectInspector {
       return list;
     }
 
+    if (data instanceof List) {
+      return (List<?>)data;
+    }
+
     throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d94c0f65/ql/src/test/queries/clientpositive/parquet_mixed_partition_formats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_mixed_partition_formats.q b/ql/src/test/queries/clientpositive/parquet_mixed_partition_formats.q
new file mode 100644
index 0000000..4d7d088
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/parquet_mixed_partition_formats.q
@@ -0,0 +1,42 @@
+DROP TABLE if exists parquet_mixed_partition_formats;
+
+CREATE TABLE parquet_mixed_partition_formats (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary string,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date)
+PARTITIONED BY (dateint int)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':';
+
+---- partition dateint=20140330 is stored as TEXTFILE
+LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330);
+
+SELECT * FROM parquet_mixed_partition_formats;
+
+DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330);
+
+---change table serde and file format to PARQUET----
+
+ALTER TABLE parquet_mixed_partition_formats
+     SET FILEFORMAT
+     INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+     OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+     SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe';
+
+DESCRIBE FORMATTED parquet_mixed_partition_formats;
+DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330);
+
+SELECT * FROM parquet_mixed_partition_formats;

http://git-wip-us.apache.org/repos/asf/hive/blob/d94c0f65/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out
new file mode 100644
index 0000000..a412350
--- /dev/null
+++ b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out
@@ -0,0 +1,303 @@
+PREHOOK: query: DROP TABLE if exists parquet_mixed_partition_formats
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE if exists parquet_mixed_partition_formats
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_mixed_partition_formats (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary string,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date)
+PARTITIONED BY (dateint int)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_mixed_partition_formats
+POSTHOOK: query: CREATE TABLE parquet_mixed_partition_formats (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary string,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date)
+PARTITIONED BY (dateint int)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_mixed_partition_formats
+PREHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE
+LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_mixed_partition_formats
+POSTHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE
+LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_mixed_partition_formats
+POSTHOOK: Output: default@parquet_mixed_partition_formats@dateint=20140330
+PREHOOK: query: SELECT * FROM parquet_mixed_partition_formats
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_mixed_partition_formats
+PREHOOK: Input: default@parquet_mixed_partition_formats@dateint=20140330
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM parquet_mixed_partition_formats
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: Input: default@parquet_mixed_partition_formats@dateint=20140330
+#### A masked pattern was here ####
+100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a    	a  	B4F3CAFDBEDD	{"k1":"v1"}	[101,200]	{"c1":10,"c2":"a"}	2011-01-01	20140330
+101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab   	ab 	68692CCAC0BDE7	{"k2":"v2"}	[102,200]	{"c1":10,"c2":"d"}	2012-02-02	20140330
+102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc  	abc	B4F3CAFDBEDD	{"k3":"v3"}	[103,200]	{"c1":10,"c2":"g"}	2013-03-03	20140330
+103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd 	abcd	68692CCAC0BDE7	{"k4":"v4"}	[104,200]	{"c1":10,"c2":"j"}	2014-04-04	20140330
+104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde	B4F3CAFDBEDD	{"k5":"v5"}	[105,200]	{"c1":10,"c2":"m"}	2015-05-05	20140330
+105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef	68692CCAC0BDE7	{"k6":"v6"}	[106,200]	{"c1":10,"c2":"p"}	2016-06-06	20140330
+106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg	B4F3CAFDBEDD	{"k7":"v7"}	[107,200]	{"c1":10,"c2":"s"}	2017-07-07	20140330
+107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh	68692CCAC0BDE7	{"k8":"v8"}	[108,200]	{"c1":10,"c2":"v"}	2018-08-08	20140330
+108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	B4F3CAFDBE	68656C6C6F	{"k9":"v9"}	[109,200]	{"c1":10,"c2":"y"}	2019-09-09	20140330
+109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef	68692CCAC0BDE7	{"k10":"v10"}	[110,200]	{"c1":10,"c2":"b"}	2020-10-10	20140330
+110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede	B4F3CAFDBEDD	{"k11":"v11"}	[111,200]	{"c1":10,"c2":"e"}	2021-11-11	20140330
+111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded	68692CCAC0BDE7	{"k12":"v12"}	[112,200]	{"c1":10,"c2":"h"}	2022-12-12	20140330
+112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd	B4F3CAFDBEDD	{"k13":"v13"}	[113,200]	{"c1":10,"c2":"k"}	2023-01-02	20140330
+113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc	68692CCAC0BDE7	{"k14":"v14"}	[114,200]	{"c1":10,"c2":"n"}	2024-02-02	20140330
+114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b	B4F3CAFDBEDD	{"k15":"v15"}	[115,200]	{"c1":10,"c2":"q"}	2025-03-03	20140330
+115	1	1	1.0	4.5	qrs	2026-04-04 16:16:16.161616161	rstuv	abcded	68692CCAC0BDE7	{"k16":"v16"}	[116,200]	{"c1":10,"c2":"q"}	2026-04-04	20140330
+116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded	B4F3CAFDBEDD	{"k17":"v17"}	[117,200]	{"c1":10,"c2":"w"}	2027-05-05	20140330
+117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded	68692CCAC0BDE7	{"k18":"v18"}	[118,200]	{"c1":10,"c2":"z"}	2028-06-06	20140330
+118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede	B4F3CAFDBEDD	{"k19":"v19"}	[119,200]	{"c1":10,"c2":"c"}	2029-07-07	20140330
+119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	68692CCAC0BDE7	{"k20":"v20"}	[120,200]	{"c1":10,"c2":"f"}	2030-08-08	20140330
+120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	B4F3CAFDBEDD	{"k21":"v21"}	[121,200]	{"c1":10,"c2":"i"}	2031-09-09	20140330
+121	1	2	1.1	6.3	lmn	2032-10-10 22:22:22.222222222	bcdef	abcde		{"k22":"v22"}	[122,200]	{"c1":10,"c2":"l"}	2032-10-10	20140330
+PREHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+# col_name            	data_type           	comment             
+	 	 
+cint                	int                 	                    
+ctinyint            	tinyint             	                    
+csmallint           	smallint            	                    
+cfloat              	float               	                    
+cdouble             	double              	                    
+cstring1            	string              	                    
+t                   	timestamp           	                    
+cchar               	char(5)             	                    
+cvarchar            	varchar(10)         	                    
+cbinary             	string              	                    
+m1                  	map<string,varchar(3)>	                    
+l1                  	array<int>          	                    
+st1                 	struct<c1:int,c2:char(1)>	                    
+d                   	date                	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+dateint             	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[20140330]          	 
+Database:           	default             	 
+Table:              	parquet_mixed_partition_formats	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	2521                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	colelction.delim    	,                   
+	field.delim         	|                   
+	mapkey.delim        	:                   
+	serialization.format	|                   
+PREHOOK: query: ---change table serde and file format to PARQUET----
+
+ALTER TABLE parquet_mixed_partition_formats
+     SET FILEFORMAT
+     INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+     OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+     SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@parquet_mixed_partition_formats
+PREHOOK: Output: default@parquet_mixed_partition_formats
+POSTHOOK: query: ---change table serde and file format to PARQUET----
+
+ALTER TABLE parquet_mixed_partition_formats
+     SET FILEFORMAT
+     INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+     OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+     SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: Output: default@parquet_mixed_partition_formats
+PREHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+# col_name            	data_type           	comment             
+	 	 
+cint                	int                 	                    
+ctinyint            	tinyint             	                    
+csmallint           	smallint            	                    
+cfloat              	float               	                    
+cdouble             	double              	                    
+cstring1            	string              	                    
+t                   	timestamp           	                    
+cchar               	char(5)             	                    
+cvarchar            	varchar(10)         	                    
+cbinary             	string              	                    
+m1                  	map<string,varchar(3)>	                    
+l1                  	array<int>          	                    
+st1                 	struct<c1:int,c2:char(1)>	                    
+d                   	date                	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+dateint             	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	colelction.delim    	,                   
+	field.delim         	|                   
+	mapkey.delim        	:                   
+	serialization.format	|                   
+PREHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: query: DESCRIBE FORMATTED parquet_mixed_partition_formats PARTITION (dateint=20140330)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+# col_name            	data_type           	comment             
+	 	 
+cint                	int                 	                    
+ctinyint            	tinyint             	                    
+csmallint           	smallint            	                    
+cfloat              	float               	                    
+cdouble             	double              	                    
+cstring1            	string              	                    
+t                   	timestamp           	                    
+cchar               	char(5)             	                    
+cvarchar            	varchar(10)         	                    
+cbinary             	string              	                    
+m1                  	map<string,varchar(3)>	                    
+l1                  	array<int>          	                    
+st1                 	struct<c1:int,c2:char(1)>	                    
+d                   	date                	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+dateint             	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[20140330]          	 
+Database:           	default             	 
+Table:              	parquet_mixed_partition_formats	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	2521                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	colelction.delim    	,                   
+	field.delim         	|                   
+	mapkey.delim        	:                   
+	serialization.format	|                   
+PREHOOK: query: SELECT * FROM parquet_mixed_partition_formats
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_mixed_partition_formats
+PREHOOK: Input: default@parquet_mixed_partition_formats@dateint=20140330
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM parquet_mixed_partition_formats
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_mixed_partition_formats
+POSTHOOK: Input: default@parquet_mixed_partition_formats@dateint=20140330
+#### A masked pattern was here ####
+100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a    	a  	B4F3CAFDBEDD	{"k1":"v1"}	[101,200]	{"c1":10,"c2":"a"}	2011-01-01	20140330
+101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab   	ab 	68692CCAC0BDE7	{"k2":"v2"}	[102,200]	{"c1":10,"c2":"d"}	2012-02-02	20140330
+102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc  	abc	B4F3CAFDBEDD	{"k3":"v3"}	[103,200]	{"c1":10,"c2":"g"}	2013-03-03	20140330
+103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd 	abcd	68692CCAC0BDE7	{"k4":"v4"}	[104,200]	{"c1":10,"c2":"j"}	2014-04-04	20140330
+104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde	B4F3CAFDBEDD	{"k5":"v5"}	[105,200]	{"c1":10,"c2":"m"}	2015-05-05	20140330
+105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef	68692CCAC0BDE7	{"k6":"v6"}	[106,200]	{"c1":10,"c2":"p"}	2016-06-06	20140330
+106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg	B4F3CAFDBEDD	{"k7":"v7"}	[107,200]	{"c1":10,"c2":"s"}	2017-07-07	20140330
+107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh	68692CCAC0BDE7	{"k8":"v8"}	[108,200]	{"c1":10,"c2":"v"}	2018-08-08	20140330
+108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	B4F3CAFDBE	68656C6C6F	{"k9":"v9"}	[109,200]	{"c1":10,"c2":"y"}	2019-09-09	20140330
+109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef	68692CCAC0BDE7	{"k10":"v10"}	[110,200]	{"c1":10,"c2":"b"}	2020-10-10	20140330
+110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede	B4F3CAFDBEDD	{"k11":"v11"}	[111,200]	{"c1":10,"c2":"e"}	2021-11-11	20140330
+111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded	68692CCAC0BDE7	{"k12":"v12"}	[112,200]	{"c1":10,"c2":"h"}	2022-12-12	20140330
+112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd	B4F3CAFDBEDD	{"k13":"v13"}	[113,200]	{"c1":10,"c2":"k"}	2023-01-02	20140330
+113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc	68692CCAC0BDE7	{"k14":"v14"}	[114,200]	{"c1":10,"c2":"n"}	2024-02-02	20140330
+114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b	B4F3CAFDBEDD	{"k15":"v15"}	[115,200]	{"c1":10,"c2":"q"}	2025-03-03	20140330
+115	1	1	1.0	4.5	qrs	2026-04-04 16:16:16.161616161	rstuv	abcded	68692CCAC0BDE7	{"k16":"v16"}	[116,200]	{"c1":10,"c2":"q"}	2026-04-04	20140330
+116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded	B4F3CAFDBEDD	{"k17":"v17"}	[117,200]	{"c1":10,"c2":"w"}	2027-05-05	20140330
+117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded	68692CCAC0BDE7	{"k18":"v18"}	[118,200]	{"c1":10,"c2":"z"}	2028-06-06	20140330
+118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede	B4F3CAFDBEDD	{"k19":"v19"}	[119,200]	{"c1":10,"c2":"c"}	2029-07-07	20140330
+119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	68692CCAC0BDE7	{"k20":"v20"}	[120,200]	{"c1":10,"c2":"f"}	2030-08-08	20140330
+120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	B4F3CAFDBEDD	{"k21":"v21"}	[121,200]	{"c1":10,"c2":"i"}	2031-09-09	20140330
+121	1	2	1.1	6.3	lmn	2032-10-10 22:22:22.222222222	bcdef	abcde		{"k22":"v22"}	[122,200]	{"c1":10,"c2":"l"}	2032-10-10	20140330


[15/24] hive git commit: HIVE-11614: CBO: Calcite Operator To Hive Operator (Calcite Return Path): ctas after order by has problem (Pengcheng Xiong, reviewd by Laljo John Pullokkaran)

Posted by pr...@apache.org.
HIVE-11614: CBO: Calcite Operator To Hive Operator (Calcite Return Path): ctas after order by has problem (Pengcheng Xiong, reviewd by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bbb91292
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bbb91292
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bbb91292

Branch: refs/heads/llap
Commit: bbb912927a1457daf283f3030cd873d55b93c8c3
Parents: ff1f5b1
Author: Pengcheng Xiong <px...@apache.org>
Authored: Sat Sep 12 20:27:16 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Sat Sep 12 20:27:16 2015 -0700

----------------------------------------------------------------------
 .../translator/PlanModifierForReturnPath.java   |   4 -
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   7 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   2 +-
 .../queries/clientpositive/cbo_rp_auto_join17.q |  14 +
 .../cbo_rp_cross_product_check_2.q              |  31 +
 .../clientpositive/cbo_rp_auto_join17.q.out     | 118 ++++
 .../cbo_rp_cross_product_check_2.q.out          | 699 +++++++++++++++++++
 7 files changed, 866 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForReturnPath.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForReturnPath.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForReturnPath.java
index 81cc474..95d692c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForReturnPath.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForReturnPath.java
@@ -34,10 +34,6 @@ public class PlanModifierForReturnPath {
 
     Pair<RelNode, RelNode> topSelparentPair = HiveCalciteUtil.getTopLevelSelect(newTopNode);
     PlanModifierUtil.fixTopOBSchema(newTopNode, topSelparentPair, resultSchema, false);
-    if (isCTAS) {
-      newTopNode = PlanModifierForASTConv.renameTopLevelSelectInResultSchema(newTopNode,
-          topSelparentPair, resultSchema);
-    }
 
     return newTopNode;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 86bdf7e..8e992da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -193,7 +193,6 @@ import com.google.common.collect.Lists;
 public class CalcitePlanner extends SemanticAnalyzer {
 
   private final AtomicInteger noColsMissingStats = new AtomicInteger(0);
-  private List<FieldSchema> topLevelFieldSchema;
   private SemanticException semanticException;
   private boolean           runCBO             = true;
 
@@ -620,7 +619,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       rethrowCalciteException(e);
       throw new AssertionError("rethrowCalciteException didn't throw for " + e.getMessage());
     }
-    optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, topLevelFieldSchema);
+    optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema);
 
     return optiqOptimizedAST;
   }
@@ -644,7 +643,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
     }
 
     RelNode modifiedOptimizedOptiqPlan = PlanModifierForReturnPath.convertOpTree(
-        introduceProjectIfNeeded(optimizedOptiqPlan), topLevelFieldSchema, this.getQB()
+        introduceProjectIfNeeded(optimizedOptiqPlan), resultSchema, this.getQB()
             .getTableDesc() != null);
 
     LOG.debug("Translating the following plan:\n" + RelOptUtil.toString(modifiedOptimizedOptiqPlan));
@@ -851,7 +850,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // 1. Gen Calcite Plan
       try {
         calciteGenPlan = genLogicalPlan(getQB(), true);
-        topLevelFieldSchema = SemanticAnalyzer.convertRowSchemaToResultSetSchema(
+        resultSchema = SemanticAnalyzer.convertRowSchemaToResultSetSchema(
             relToHiveRR.get(calciteGenPlan),
             HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
       } catch (SemanticException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f6052e3..16957b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -261,7 +261,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private final HashMap<String, SplitSample> nameToSplitSample;
   Map<GroupByOperator, Set<String>> groupOpToInputTables;
   Map<String, PrunedPartitionList> prunedPartitions;
-  private List<FieldSchema> resultSchema;
+  protected List<FieldSchema> resultSchema;
   private CreateViewDesc createVwDesc;
   private ArrayList<String> viewsExpanded;
   private ASTNode viewSelect;

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
new file mode 100644
index 0000000..7e2f068
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
@@ -0,0 +1,14 @@
+set hive.cbo.returnpath.hiveop=true;
+set hive.auto.convert.join = true;
+
+CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE;
+
+explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+
+
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+
+SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
new file mode 100644
index 0000000..6c35548
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
@@ -0,0 +1,31 @@
+set hive.cbo.returnpath.hiveop=true;
+set hive.explain.user=false;
+-- SORT_QUERY_RESULTS
+
+create table A as
+select * from src;
+
+create table B as
+select * from src order by key
+limit 10;
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000000;
+
+explain select * from A join B;
+
+explain select * from B d1 join B d2 on d1.key = d2.key join A;
+
+explain select * from A join 
+         (select d1.key 
+          from B d1 join B d2 on d1.key = d2.key 
+          where 1 = 1 group by d1.key) od1;
+
+explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1;
+
+explain select * from 
+(select A.key from A group by key) ss join 
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1;
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
new file mode 100644
index 0000000..351699d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
@@ -0,0 +1,118 @@
+PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-4 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-4
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        src1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        src1 
+          TableScan
+            alias: src1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: string)
+                    1 key (type: string)
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src2
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Inner Join 0 to 1
+                  keys:
+                    0 key (type: string)
+                    1 key (type: string)
+                  outputColumnNames: key, value, key0, value0
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(key) (type: int), value (type: string), UDFToInteger(key0) (type: int), value0 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.dest1
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+-793937029770

http://git-wip-us.apache.org/repos/asf/hive/blob/bbb91292/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
new file mode 100644
index 0000000..cdd47b6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
@@ -0,0 +1,699 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@A
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@A
+PREHOOK: query: create table B as
+select * from src order by key
+limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@B
+POSTHOOK: query: create table B as
+select * from src order by key
+limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@B
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+PREHOOK: query: explain select * from A join B
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join B
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
+              Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 
+                  1 
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: key, value, key0, value0
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
+                  outputColumnNames: key, value, key0, value0
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
+PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-7 is a root stage
+  Stage-5 depends on stages: Stage-7
+  Stage-0 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-7
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+        d1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+        d1 
+          TableScan
+            alias: d1
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: d2
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Inner Join 0 to 1
+                  keys:
+                    0 
+                    1 
+                  outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Map Join MAPJOIN[25][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+PREHOOK: query: explain select * from A join 
+         (select d1.key 
+          from B d1 join B d2 on d1.key = d2.key 
+          where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join 
+         (select d1.key 
+          from B d1 join B d2 on d1.key = d2.key 
+          where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-9 is a root stage
+  Stage-2 depends on stages: Stage-9
+  Stage-8 depends on stages: Stage-2
+  Stage-6 depends on stages: Stage-8
+  Stage-0 depends on stages: Stage-6
+
+STAGE PLANS:
+  Stage: Stage-9
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        od1:d1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        od1:d1 
+          TableScan
+            alias: d1
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: d2
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-8
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0, _col1, _col5
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[22][bigTable=d2] in task 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-9 is a root stage
+  Stage-2 depends on stages: Stage-9
+  Stage-8 depends on stages: Stage-2
+  Stage-6 depends on stages: Stage-8
+  Stage-0 depends on stages: Stage-6
+
+STAGE PLANS:
+  Stage: Stage-9
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        od1:d1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        od1:d1 
+          TableScan
+            alias: d1
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: d2
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0
+              Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 5 Data size: 47 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 5 Data size: 47 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-8
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0, _col1, _col5
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Map Join MAPJOIN[47][bigTable=?] in task 'Stage-7:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[39][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[21][tables = [ss, od1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: explain select * from 
+(select A.key from A group by key) ss join 
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from 
+(select A.key from A group by key) ss join 
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-8 depends on stages: Stage-1, Stage-4 , consists of Stage-10, Stage-11, Stage-2
+  Stage-10 has a backup stage: Stage-2
+  Stage-6 depends on stages: Stage-10
+  Stage-11 has a backup stage: Stage-2
+  Stage-7 depends on stages: Stage-11
+  Stage-2
+  Stage-12 is a root stage
+  Stage-4 depends on stages: Stage-12
+  Stage-0 depends on stages: Stage-6, Stage-7, Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-10
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $INTNAME1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $INTNAME1 
+          TableScan
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-11
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $INTNAME 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $INTNAME 
+          TableScan
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-7
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string)
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-12
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        od1:d1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        od1:d1 
+          TableScan
+            alias: d1
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: d2
+            Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[09/24] hive git commit: HIVE-11606: Bucket map joins fail at hash table construction time (Vikram Dixit K, reviewed by Sergey Shelukhin)

Posted by pr...@apache.org.
HIVE-11606: Bucket map joins fail at hash table construction time (Vikram Dixit K, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9fe8802c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9fe8802c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9fe8802c

Branch: refs/heads/llap
Commit: 9fe8802cb83e05c0392b11b8dcfe354fecfda786
Parents: 4ea8e29
Author: vikram <vi...@hortonworks.com>
Authored: Thu Sep 10 13:29:34 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Thu Sep 10 13:30:24 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/Operator.java    |   3 +-
 .../ql/optimizer/ReduceSinkMapJoinProc.java     |  11 +-
 .../clientpositive/bucket_map_join_tez1.q       |  22 ++
 .../spark/bucket_map_join_tez1.q.out            | 226 +++++++++++++++++++
 .../tez/bucket_map_join_tez1.q.out              | 210 +++++++++++++++++
 5 files changed, 469 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9fe8802c/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index acbe504..92e5446 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -319,6 +319,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   @SuppressWarnings("unchecked")
   public final void initialize(Configuration hconf, ObjectInspector[] inputOIs)
       throws HiveException {
+    this.done = false;
     if (state == State.INIT) {
       return;
     }
@@ -369,7 +370,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
     }
 
     if (isLogInfoEnabled) {
-      LOG.info("Initialization Done " + id + " " + getName());
+      LOG.info("Initialization Done " + id + " " + getName() + " done is reset.");
     }
 
     initializeChildren(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/9fe8802c/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
index 71c766f..757ff5e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
@@ -215,8 +215,15 @@ public class ReduceSinkMapJoinProc implements NodeProcessor {
         tableSize /= bucketCount;
       }
     }
-    LOG.info("Mapjoin " + mapJoinOp + ", pos: " + pos + " --> " + parentWork.getName() + " ("
-      + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
+    if (keyCount == 0) {
+      keyCount = 1;
+    }
+    if (tableSize == 0) {
+      tableSize = 1;
+    }
+    LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = )" + joinConf.isBucketMapJoin()
+        + ", pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount
+        + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
     joinConf.getParentToInput().put(pos, parentWork.getName());
     if (keyCount != Long.MAX_VALUE) {
       joinConf.getParentKeyCounts().put(pos, keyCount);

http://git-wip-us.apache.org/repos/asf/hive/blob/9fe8802c/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 0f9dd6d..8546e78 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -95,3 +95,25 @@ explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key joi
 explain
 select a.key, a.value, b.value
 from tab a join tab_part b on a.key = b.key and a.ds = b.ds;
+
+set hive.mapjoin.hybridgrace.hashtable = false;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411;
+
+explain
+select count(*)
+from tab_part a join tab b on a.key = b.key;
+
+select count(*)
+from tab_part a join tab b on a.key = b.key;
+
+set hive.mapjoin.hybridgrace.hashtable = false;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411;
+
+explain
+select count(*)
+from tab_part a join tab b on a.key = b.key;
+
+select count(*)
+from tab_part a join tab b on a.key = b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/9fe8802c/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
index 34ddc90..2c14065 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
@@ -1424,3 +1424,229 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                mode: complete
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                mode: complete
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1

http://git-wip-us.apache.org/repos/asf/hive/blob/9fe8802c/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
index 8338672..af5e6e6 100644
--- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
@@ -1350,3 +1350,213 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (CUSTOM_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                mode: complete
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (CUSTOM_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                mode: complete
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1


[19/24] hive git commit: HIVE-11780: Add "set role none" support(Sun Dapeng, reviewed by Ferdinand Xu)

Posted by pr...@apache.org.
HIVE-11780: Add "set role none" support(Sun Dapeng, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8bcd07d1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8bcd07d1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8bcd07d1

Branch: refs/heads/llap
Commit: 8bcd07d18d7791745b54b1775bbf54e26c23aeea
Parents: 66fb960
Author: Sun Dapeng <da...@intel.com>
Authored: Mon Sep 14 02:54:52 2015 -0400
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Mon Sep 14 02:54:52 2015 -0400

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g     | 2 ++
 .../plugin/sqlstd/SQLStdHiveAccessController.java            | 5 +++++
 .../clientpositive/authorization_set_show_current_role.q     | 3 +++
 .../clientpositive/authorization_set_show_current_role.q.out | 8 ++++++++
 4 files changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8bcd07d1/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index cf7ab3a..3969a54 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -1495,6 +1495,8 @@ setRole
     (
     (KW_ALL) => (all=KW_ALL) -> ^(TOK_SHOW_SET_ROLE Identifier[$all.text])
     |
+    (KW_NONE) => (none=KW_NONE) -> ^(TOK_SHOW_SET_ROLE Identifier[$none.text])
+    |
     identifier -> ^(TOK_SHOW_SET_ROLE identifier)
     )
     ;

http://git-wip-us.apache.org/repos/asf/hive/blob/8bcd07d1/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
index 9c78876..2f6e26b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
@@ -520,6 +520,11 @@ public class SQLStdHiveAccessController implements HiveAccessController {
     HiveAuthzPluginException {
 
     initUserRoles();
+    if (NONE.equalsIgnoreCase(roleName)) {
+      // for set role NONE, clear all roles for current session.
+      currentRoles.clear();
+      return;
+    }
     if (ALL.equalsIgnoreCase(roleName)) {
       // for set role ALL, reset roles to default roles.
       currentRoles.clear();

http://git-wip-us.apache.org/repos/asf/hive/blob/8bcd07d1/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q b/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
index f10b649..50a5862 100644
--- a/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
+++ b/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
@@ -13,6 +13,9 @@ show current roles;
 set role PUBLIC;
 show current roles;
 
+set role NONE;
+show current roles;
+
 set role ALL;
 show current roles;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8bcd07d1/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out
index 4ac4320..a2b273d 100644
--- a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out
+++ b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out
@@ -33,6 +33,14 @@ PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: show current roles
 POSTHOOK: type: SHOW_ROLES
 public
+PREHOOK: query: set role NONE
+PREHOOK: type: SHOW_ROLES
+POSTHOOK: query: set role NONE
+POSTHOOK: type: SHOW_ROLES
+PREHOOK: query: show current roles
+PREHOOK: type: SHOW_ROLES
+POSTHOOK: query: show current roles
+POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: set role ALL
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set role ALL


[02/24] hive git commit: HIVE-11482 : Adds retrying thrift client for HiveServer2 (Akshay Goyal, reviewed by Amareshwari)

Posted by pr...@apache.org.
HIVE-11482 : Adds retrying thrift client for HiveServer2 (Akshay Goyal, reviewed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9b11caff
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9b11caff
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9b11caff

Branch: refs/heads/llap
Commit: 9b11caff8b61697c88caa1ed5606c665624f3290
Parents: d94c0f6
Author: Akshay Goyal <ak...@gmail.com>
Authored: Thu Sep 10 10:22:31 2015 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Thu Sep 10 10:22:31 2015 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  11 +
 .../thrift/RetryingThriftCLIServiceClient.java  | 331 +++++++++++++++++++
 .../cli/TestRetryingThriftCLIServiceClient.java | 133 ++++++++
 3 files changed, 475 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9b11caff/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 8a00079..d2c5885 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2022,6 +2022,17 @@ public class HiveConf extends Configuration {
         "Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" +
         " This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" +
         "(hive.server2.session.check.interval) are enabled."),
+    HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " +
+      "failure of Thrift HiveServer2 calls"),
+    HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " +
+      "retries while opening a connection to HiveServe2"),
+    HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s",
+      new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " +
+      "consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"),
+    HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" +
+      " client"),
+    HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " +
+      "thrift client"),
 
     HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile",
         "Comma separated list of non-SQL Hive commands users are authorized to execute"),

http://git-wip-us.apache.org/repos/asf/hive/blob/9b11caff/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
new file mode 100644
index 0000000..4bd7336
--- /dev/null
+++ b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
@@ -0,0 +1,331 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.thrift;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.service.auth.HiveAuthFactory;
+import org.apache.hive.service.auth.KerberosSaslHelper;
+import org.apache.hive.service.auth.PlainSaslHelper;
+import org.apache.hive.service.cli.*;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TException;
+
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+import javax.security.sasl.SaslException;
+import java.lang.reflect.*;
+import java.net.SocketException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * RetryingThriftCLIServiceClient. Creates a proxy for a CLIServiceClient
+ * implementation and retries calls to it on failure.
+ */
+public class RetryingThriftCLIServiceClient implements InvocationHandler {
+  public static final Log LOG = LogFactory.getLog(RetryingThriftCLIServiceClient.class);
+  private ThriftCLIServiceClient base;
+  private final int retryLimit;
+  private final int retryDelaySeconds;
+  private HiveConf conf;
+  private TTransport transport;
+
+  public static class CLIServiceClientWrapper extends CLIServiceClient {
+    private final ICLIService cliService;
+
+    public CLIServiceClientWrapper(ICLIService icliService) {
+      cliService = icliService;
+    }
+
+    @Override
+    public SessionHandle openSession(String username, String password) throws HiveSQLException {
+      return cliService.openSession(username, password, Collections.<String, String>emptyMap());
+    }
+
+    @Override
+    public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner,
+                                     String renewer) throws HiveSQLException {
+      return cliService.getDelegationToken(sessionHandle, authFactory, owner, renewer);
+    }
+
+    @Override
+    public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory,
+                                      String tokenStr) throws HiveSQLException {
+      cliService.cancelDelegationToken(sessionHandle, authFactory, tokenStr);
+    }
+
+    @Override
+    public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory,
+                                     String tokenStr) throws HiveSQLException {
+      cliService.renewDelegationToken(sessionHandle, authFactory, tokenStr);
+    }
+
+    @Override
+    public SessionHandle openSession(String username, String password, Map<String, String> configuration)
+      throws HiveSQLException {
+      return cliService.openSession(username, password, configuration);
+    }
+
+    @Override
+    public SessionHandle openSessionWithImpersonation(String username,
+                                                      String password,
+                                                      Map<String, String> configuration,
+                                                      String delegationToken) throws HiveSQLException {
+      return cliService.openSessionWithImpersonation(username, password, configuration, delegationToken);
+    }
+
+    @Override
+    public void closeSession(SessionHandle sessionHandle) throws HiveSQLException {
+      cliService.closeSession(sessionHandle);
+    }
+
+    @Override
+    public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType getInfoType) throws HiveSQLException {
+      return cliService.getInfo(sessionHandle, getInfoType);
+    }
+
+    @Override
+    public OperationHandle executeStatement(SessionHandle sessionHandle,
+                                            String statement,
+                                            Map<String, String> confOverlay) throws HiveSQLException {
+      return cliService.executeStatement(sessionHandle, statement, confOverlay);
+    }
+
+    @Override
+    public OperationHandle executeStatementAsync(SessionHandle sessionHandle,
+                                                 String statement,
+                                                 Map<String, String> confOverlay) throws HiveSQLException {
+      return cliService.executeStatementAsync(sessionHandle, statement, confOverlay);
+    }
+
+    @Override
+    public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException {
+      return cliService.getTypeInfo(sessionHandle);
+    }
+
+    @Override
+    public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException {
+      return cliService.getCatalogs(sessionHandle);
+    }
+
+    @Override
+    public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName)
+      throws HiveSQLException {
+      return cliService.getSchemas(sessionHandle, catalogName, schemaName);
+    }
+
+    @Override
+    public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName,
+                                     String tableName, List<String> tableTypes) throws HiveSQLException {
+      return cliService.getTables(sessionHandle, catalogName, schemaName, tableName, tableTypes);
+    }
+
+    @Override
+    public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException {
+      return null;
+    }
+
+    @Override
+    public OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName,
+                                      String tableName, String columnName) throws HiveSQLException {
+      return cliService.getColumns(sessionHandle, catalogName, schemaName, tableName, columnName);
+    }
+
+    @Override
+    public OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName,
+                                        String functionName) throws HiveSQLException {
+      return cliService.getFunctions(sessionHandle, catalogName, schemaName, functionName);
+    }
+
+    @Override
+    public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException {
+      return cliService.getOperationStatus(opHandle);
+    }
+
+    @Override
+    public void cancelOperation(OperationHandle opHandle) throws HiveSQLException {
+      cliService.cancelOperation(opHandle);
+    }
+
+    @Override
+    public void closeOperation(OperationHandle opHandle) throws HiveSQLException {
+      cliService.closeOperation(opHandle);
+    }
+
+    @Override
+    public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException {
+      return cliService.getResultSetMetadata(opHandle);
+    }
+
+    @Override
+    public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows,
+                               FetchType fetchType) throws HiveSQLException {
+      return cliService.fetchResults(opHandle, orientation, maxRows, fetchType);
+    }
+  }
+
+  protected RetryingThriftCLIServiceClient(HiveConf conf) {
+    this.conf = conf;
+    retryLimit = conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT);
+    retryDelaySeconds = (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS,
+      TimeUnit.SECONDS);
+  }
+
+  public static CLIServiceClient newRetryingCLIServiceClient(HiveConf conf) throws HiveSQLException {
+    RetryingThriftCLIServiceClient retryClient = new RetryingThriftCLIServiceClient(conf);
+    retryClient.connectWithRetry(conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT));
+    ICLIService cliService =
+      (ICLIService) Proxy.newProxyInstance(RetryingThriftCLIServiceClient.class.getClassLoader(),
+        CLIServiceClient.class.getInterfaces(), retryClient);
+    return new CLIServiceClientWrapper(cliService);
+  }
+
+  protected void connectWithRetry(int retries) throws HiveSQLException {
+    for (int i = 0 ; i < retries; i++) {
+      try {
+        connect(conf);
+        break;
+      } catch (TTransportException e) {
+        if (i + 1 == retries) {
+          throw new HiveSQLException("Unable to connect after " + retries + " retries", e);
+        }
+        LOG.warn("Connection attempt " + i, e);
+      }
+      try {
+        Thread.sleep(retryDelaySeconds * 1000);
+      } catch (InterruptedException e) {
+        LOG.warn("Interrupted", e);
+      }
+    }
+  }
+
+  protected synchronized TTransport connect(HiveConf conf) throws HiveSQLException, TTransportException {
+    if (transport != null && transport.isOpen()) {
+      transport.close();
+    }
+
+    String host = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST);
+    int port = conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT);
+    LOG.info("Connecting to " + host + ":" + port);
+
+    transport = new TSocket(host, port);
+    ((TSocket) transport).setTimeout((int) conf.getTimeVar(HiveConf.ConfVars.SERVER_READ_SOCKET_TIMEOUT,
+      TimeUnit.SECONDS) * 1000);
+    try {
+      ((TSocket) transport).getSocket().setKeepAlive(conf.getBoolVar(HiveConf.ConfVars.SERVER_TCP_KEEP_ALIVE));
+    } catch (SocketException e) {
+      LOG.error("Error setting keep alive to " + conf.getBoolVar(HiveConf.ConfVars.SERVER_TCP_KEEP_ALIVE), e);
+    }
+
+    String userName = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_USER);
+    String passwd = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_PASSWORD);
+
+    try {
+      transport = PlainSaslHelper.getPlainTransport(userName, passwd, transport);
+    } catch (SaslException e) {
+      LOG.error("Error creating plain SASL transport", e);
+    }
+
+    TProtocol protocol = new TBinaryProtocol(transport);
+    transport.open();
+    base = new ThriftCLIServiceClient(new TCLIService.Client(protocol));
+    LOG.info("Connected!");
+    return transport;
+  }
+
+  protected class InvocationResult {
+    final boolean success;
+    final Object result;
+    final Throwable exception;
+
+    InvocationResult(boolean success, Object result, Throwable exception) {
+      this.success = success;
+      this.result = result;
+      this.exception = exception;
+    }
+  }
+
+  protected InvocationResult invokeInternal(Method method, Object[] args) throws Throwable {
+    InvocationResult result;
+    try {
+      Object methodResult = method.invoke(base, args);
+      result = new InvocationResult(true, methodResult, null);
+    } catch (UndeclaredThrowableException e) {
+      throw e.getCause();
+    } catch (InvocationTargetException e) {
+      if (e.getCause() instanceof HiveSQLException) {
+        HiveSQLException hiveExc = (HiveSQLException) e.getCause();
+        Throwable cause = hiveExc.getCause();
+        if ((cause instanceof TApplicationException) ||
+          (cause instanceof TProtocolException) ||
+          (cause instanceof TTransportException)) {
+          result =  new InvocationResult(false, null, hiveExc);
+        } else {
+          throw hiveExc;
+        }
+      } else {
+        throw e.getCause();
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public Object invoke(Object o, Method method, Object[] args) throws Throwable {
+    int attempts = 0;
+
+    while (true) {
+      attempts++;
+      InvocationResult invokeResult = invokeInternal(method, args);
+      if (invokeResult.success) {
+        return invokeResult.result;
+      }
+
+      // Error because of thrift client, we have to recreate base object
+      connectWithRetry(conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT));
+
+      if (attempts >=  retryLimit) {
+        LOG.error(method.getName() + " failed after " + attempts + " retries.",  invokeResult.exception);
+        throw invokeResult.exception;
+      }
+
+      LOG.warn("Last call ThriftCLIServiceClient." + method.getName() + " failed, attempts = " + attempts,
+        invokeResult.exception);
+      Thread.sleep(retryDelaySeconds * 1000);
+    }
+  }
+
+  public int getRetryLimit() {
+    return retryLimit;
+  }
+
+  public int getRetryDelaySeconds() {
+    return retryDelaySeconds;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9b11caff/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java b/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
new file mode 100644
index 0000000..3798053
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.service.auth.HiveAuthFactory;
+import org.apache.hive.service.cli.thrift.RetryingThriftCLIServiceClient;
+import org.apache.hive.service.cli.thrift.ThriftCLIService;
+import org.apache.hive.service.server.HiveServer2;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+/**
+ * Test CLI service with a retrying client. All tests should pass. This is to validate that calls
+ * are transferred successfully.
+ */
+public class TestRetryingThriftCLIServiceClient {
+  protected static ThriftCLIService service;
+
+  static class RetryingThriftCLIServiceClientTest extends RetryingThriftCLIServiceClient {
+    int callCount = 0;
+    int connectCount = 0;
+    static RetryingThriftCLIServiceClientTest handlerInst;
+
+    protected RetryingThriftCLIServiceClientTest(HiveConf conf) {
+      super(conf);
+    }
+
+    public static CLIServiceClient newRetryingCLIServiceClient(HiveConf conf) throws HiveSQLException {
+      handlerInst = new RetryingThriftCLIServiceClientTest(conf);
+      handlerInst.connectWithRetry(conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT));
+
+      ICLIService cliService =
+        (ICLIService) Proxy.newProxyInstance(RetryingThriftCLIServiceClientTest.class.getClassLoader(),
+          CLIServiceClient.class.getInterfaces(), handlerInst);
+      return new CLIServiceClientWrapper(cliService);
+    }
+
+    @Override
+    protected InvocationResult invokeInternal(Method method, Object[] args) throws Throwable {
+      System.out.println("## Calling: " + method.getName() + ", " + callCount + "/" + getRetryLimit());
+      callCount++;
+      return super.invokeInternal(method, args);
+    }
+
+    @Override
+    protected synchronized TTransport connect(HiveConf conf) throws HiveSQLException, TTransportException {
+      connectCount++;
+      return super.connect(conf);
+    }
+  }
+  @Test
+  public void testRetryBehaviour() throws Exception {
+    // Start hive server2
+    HiveConf hiveConf = new HiveConf();
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost");
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 15000);
+    hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthFactory.AuthTypes.NONE.toString());
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3);
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS, 10);
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s");
+
+    final HiveServer2 server = new HiveServer2();
+    server.init(hiveConf);
+    server.start();
+    Thread.sleep(5000);
+    System.out.println("## HiveServer started");
+
+    // Check if giving invalid address causes retry in connection attempt
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 17000);
+    try {
+      CLIServiceClient cliServiceClient =
+        RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+      fail("Expected to throw exception for invalid port");
+    } catch (HiveSQLException sqlExc) {
+      assertTrue(sqlExc.getCause() instanceof TTransportException);
+      assertTrue(sqlExc.getMessage().contains("3"));
+    }
+
+    // Reset port setting
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 15000);
+    // Create client
+    CLIServiceClient cliServiceClient =
+      RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+    System.out.println("## Created client");
+
+    // kill server
+    server.stop();
+    Thread.sleep(5000);
+
+    // submit few queries
+    try {
+      Map<String, String> confOverlay = new HashMap<String, String>();
+      RetryingThriftCLIServiceClientTest.handlerInst.callCount = 0;
+      RetryingThriftCLIServiceClientTest.handlerInst.connectCount = 0;
+      SessionHandle session = cliServiceClient.openSession("anonymous", "anonymous");
+    } catch (HiveSQLException exc) {
+      exc.printStackTrace();
+      assertTrue(exc.getCause() instanceof TException);
+      assertEquals(1, RetryingThriftCLIServiceClientTest.handlerInst.callCount);
+      assertEquals(3, RetryingThriftCLIServiceClientTest.handlerInst.connectCount);
+    }
+
+  }
+}


[14/24] hive git commit: HIVE-10980 : Merge of dynamic partitions loads all data to default partition (Illya Yalovyy via Ashutosh Chauhan)

Posted by pr...@apache.org.
HIVE-10980 : Merge of dynamic partitions loads all data to default partition (Illya Yalovyy via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff1f5b1a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff1f5b1a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff1f5b1a

Branch: refs/heads/llap
Commit: ff1f5b1a7afc4c934bad2a39da217513760d0ba2
Parents: 1e97b16
Author: Illya Yalovyy <ya...@amazon.com>
Authored: Thu Sep 10 00:17:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Sep 12 19:50:08 2015 -0700

----------------------------------------------------------------------
 data/files/dynpartdata1.txt                     |  5 +
 data/files/dynpartdata2.txt                     |  6 ++
 .../hive/ql/optimizer/GenMapRedUtils.java       | 57 +++++++++--
 ...nMapRedUtilsUsePartitionColumnsNegative.java | 73 +++++++++++++++
 ...nMapRedUtilsUsePartitionColumnsPositive.java | 61 ++++++++++++
 .../test/queries/clientpositive/dynpart_merge.q | 28 ++++++
 .../results/clientpositive/dynpart_merge.q.out  | 99 ++++++++++++++++++++
 .../list_bucket_dml_6.q.java1.7.out             | 12 +--
 .../list_bucket_dml_6.q.java1.8.out             | 12 +--
 .../clientpositive/list_bucket_dml_7.q.out      | 12 +--
 10 files changed, 341 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/data/files/dynpartdata1.txt
----------------------------------------------------------------------
diff --git a/data/files/dynpartdata1.txt b/data/files/dynpartdata1.txt
new file mode 100644
index 0000000..aefb87f
--- /dev/null
+++ b/data/files/dynpartdata1.txt
@@ -0,0 +1,5 @@
+20150316,16,reqA,clusterIdA,cacheId1
+20150316,16,reqB,clusterIdB,cacheId2
+20150316,16,reqA,clusterIdC,cacheId3
+20150316,16,reqD,clusterIdD,cacheId4
+20150316,16,reqA,clusterIdA,cacheId5

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/data/files/dynpartdata2.txt
----------------------------------------------------------------------
diff --git a/data/files/dynpartdata2.txt b/data/files/dynpartdata2.txt
new file mode 100644
index 0000000..4afdb7f
--- /dev/null
+++ b/data/files/dynpartdata2.txt
@@ -0,0 +1,6 @@
+20150317,16,reqB,clusterIdB,cacheId6
+20150318,16,reqA,clusterIdC,cacheId6
+20150317,15,reqD,clusterIdD,cacheId7
+20150316,16,reqA,clusterIdD,cacheId8
+20150316,16,reqD,clusterIdB,cacheId9
+20150316,16,reqB,clusterIdA,cacheId1

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 4a325fb..02fbdfe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.optimizer;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.mapred.InputFormat;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Interner;
 
 /**
@@ -1234,16 +1236,13 @@ public final class GenMapRedUtils {
       ArrayList<ColumnInfo> signature = inputRS.getSignature();
       String tblAlias = fsInputDesc.getTableInfo().getTableName();
       LinkedHashMap<String, String> colMap = new LinkedHashMap<String, String>();
-      StringBuilder partCols = new StringBuilder();
       for (String dpCol : dpCtx.getDPColNames()) {
         ColumnInfo colInfo = new ColumnInfo(dpCol,
             TypeInfoFactory.stringTypeInfo, // all partition column type should be string
             tblAlias, true); // partition column is virtual column
         signature.add(colInfo);
         colMap.put(dpCol, dpCol); // input and output have the same column name
-        partCols.append(dpCol).append('/');
       }
-      partCols.setLength(partCols.length() - 1); // remove the last '/'
       inputRS.setSignature(signature);
 
       // create another DynamicPartitionCtx, which has a different input-to-DP column mapping
@@ -1252,9 +1251,7 @@ public final class GenMapRedUtils {
       fsOutputDesc.setDynPartCtx(dpCtx2);
 
       // update the FileSinkOperator to include partition columns
-      fsInputDesc.getTableInfo().getProperties().setProperty(
-        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS,
-        partCols.toString()); // list of dynamic partition column names
+      usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames());
     } else {
       // non-partitioned table
       fsInputDesc.getTableInfo().getProperties().remove(
@@ -1877,7 +1874,55 @@ public final class GenMapRedUtils {
     }
     return null;
   }
+  /**
+   * Uses only specified partition columns.
+   * Provided properties should be pre-populated with partition column names and types.
+   * This function retains only information related to the columns from the list.
+   * @param properties properties to update
+   * @param partColNames list of columns to use
+   */
+  static void usePartitionColumns(Properties properties, List<String> partColNames) {
+    Preconditions.checkArgument(!partColNames.isEmpty(), "No partition columns provided to use");
+    Preconditions.checkArgument(new HashSet<String>(partColNames).size() == partColNames.size(),
+        "Partition columns should be unique: " + partColNames);
+
+    String[] partNames = properties.getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS)
+        .split("/");
+    String[] partTypes = properties.getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES)
+        .split(":");
+    Preconditions.checkArgument(partNames.length == partTypes.length,
+        "Partition Names, " + Arrays.toString(partNames) + " don't match partition Types, "
+        + Arrays.toString(partTypes));
+
+    Map<String, String> typeMap = new HashMap();
+    for (int i = 0; i < partNames.length; i++) {
+      String previousValue = typeMap.put(partNames[i], partTypes[i]);
+      Preconditions.checkArgument(previousValue == null, "Partition columns configuration is inconsistent. "
+          + "There are duplicates in partition column names: " + partNames);
+    }
 
+    StringBuilder partNamesBuf = new StringBuilder();
+    StringBuilder partTypesBuf = new StringBuilder();
+    for (String partName : partColNames) {
+      partNamesBuf.append(partName).append('/');
+      String partType = typeMap.get(partName);
+      if (partType == null) {
+        throw new RuntimeException("Type information for partition column " + partName + " is missing.");
+      }
+      partTypesBuf.append(partType).append(':');
+    }
+    partNamesBuf.setLength(partNamesBuf.length() - 1);
+    partTypesBuf.setLength(partTypesBuf.length() - 1);
+
+    properties.setProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS,
+        partNamesBuf.toString());
+    properties.setProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES,
+        partTypesBuf.toString());
+  }
   private GenMapRedUtils() {
     // prevent instantiation
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
new file mode 100644
index 0000000..153061f
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsNegative.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Properties;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.junit.Test;
+
+public class TestGenMapRedUtilsUsePartitionColumnsNegative {
+
+  @Test(expected = NullPointerException.class)
+  public void testUsePartitionColumnsNoPartColNames() {
+    Properties p = new Properties();
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsNamesTypesMismatch() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsNoPartitionsToRetain() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Collections.EMPTY_LIST);
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testUsePartitionColumnsWrongPartColName() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p2"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsDuplicatePartColNameInArgument() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1:t2");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1","p2","p1"));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testUsePartitionColumnsDuplicatePartColNameInConfiguration() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "p1/p2/p1");
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, "t1:t2:t3");
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList("p1"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
new file mode 100644
index 0000000..9bcca66
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsUsePartitionColumnsPositive.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.Arrays;
+import java.util.Properties;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.junit.Assert.*;
+
+@RunWith(Parameterized.class)
+public class TestGenMapRedUtilsUsePartitionColumnsPositive {
+
+  @Parameterized.Parameters(name = "{index}: updatePartitions({2})")
+  public static Iterable<Object[]> testCases() {
+    return Arrays.asList(new Object[][]{
+      {"p1/p2/p3","t1:t2:t3","p2","p2","t2"},
+      {"p1/p2/p3","t1:t2:t3","p2,p3","p2/p3","t2:t3"},
+      {"p1/p2/p3","t1:t2:t3","p1,p2,p3","p1/p2/p3","t1:t2:t3"},
+      {"p1/p2/p3","t1:t2:t3","p1,p3","p1/p3","t1:t3"},
+      {"p1","t1","p1","p1","t1"},
+      {"p1/p2/p3","t1:t2:t3","p3,p2,p1","p3/p2/p1","t3:t2:t1"}
+    });
+  }
+
+  @Parameterized.Parameter(0) public String inPartColNames;
+  @Parameterized.Parameter(1) public String inPartColTypes;
+  @Parameterized.Parameter(2) public String partNamesToRetain;
+  @Parameterized.Parameter(3) public String expectedPartColNames;
+  @Parameterized.Parameter(4) public String expectedPartColTypes;
+
+  @Test
+  public void testUsePartitionColumns() {
+    Properties p = new Properties();
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, inPartColNames);
+    p.setProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, inPartColTypes);
+    GenMapRedUtils.usePartitionColumns(p, Arrays.asList(partNamesToRetain.split(",")));
+    String actualNames = p.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS);
+    String actualTypes = p.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES);
+    assertEquals(expectedPartColNames, actualNames);
+    assertEquals(expectedPartColTypes, actualTypes);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/queries/clientpositive/dynpart_merge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_merge.q b/ql/src/test/queries/clientpositive/dynpart_merge.q
new file mode 100644
index 0000000..26f4de7
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dynpart_merge.q
@@ -0,0 +1,28 @@
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=strict;
+set hive.optimize.sort.dynamic.partition=false;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ',';
+
+load data local inpath '../../data/files/dynpartdata1.txt' into table sdp;
+load data local inpath '../../data/files/dynpartdata2.txt' into table sdp;
+
+create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string);
+
+insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16;
+
+select * from tdp order by caid;
+show partitions tdp;

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/dynpart_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_merge.q.out b/ql/src/test/results/clientpositive/dynpart_merge.q.out
new file mode 100644
index 0000000..1c6f556
--- /dev/null
+++ b/ql/src/test/results/clientpositive/dynpart_merge.q.out
@@ -0,0 +1,99 @@
+PREHOOK: query: create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sdp
+POSTHOOK: query: create external table sdp (
+  dataint bigint,
+  hour int,
+  req string,
+  cid string,
+  caid string
+)
+row format delimited
+fields terminated by ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sdp
+PREHOOK: query: load data local inpath '../../data/files/dynpartdata1.txt' into table sdp
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sdp
+POSTHOOK: query: load data local inpath '../../data/files/dynpartdata1.txt' into table sdp
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sdp
+PREHOOK: query: load data local inpath '../../data/files/dynpartdata2.txt' into table sdp
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sdp
+POSTHOOK: query: load data local inpath '../../data/files/dynpartdata2.txt' into table sdp
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sdp
+PREHOOK: query: create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tdp
+POSTHOOK: query: create table tdp (cid string, caid string)
+partitioned by (dataint bigint, hour int, req string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tdp
+PREHOOK: query: insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sdp
+PREHOOK: Output: default@tdp@dataint=20150316/hour=16
+POSTHOOK: query: insert overwrite table tdp partition (dataint=20150316, hour=16, req)
+select cid, caid, req from sdp where dataint=20150316 and hour=16
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sdp
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqA
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqB
+POSTHOOK: Output: default@tdp@dataint=20150316/hour=16/req=reqD
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqA).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqA).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqB).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqB).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqD).caid SIMPLE [(sdp)sdp.FieldSchema(name:caid, type:string, comment:null), ]
+POSTHOOK: Lineage: tdp PARTITION(dataint=20150316,hour=16,req=reqD).cid SIMPLE [(sdp)sdp.FieldSchema(name:cid, type:string, comment:null), ]
+PREHOOK: query: select * from tdp order by caid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tdp
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqA
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqB
+PREHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqD
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tdp order by caid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tdp
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqA
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqB
+POSTHOOK: Input: default@tdp@dataint=20150316/hour=16/req=reqD
+#### A masked pattern was here ####
+clusterIdA	cacheId1	20150316	16	reqB
+clusterIdA	cacheId1	20150316	16	reqA
+clusterIdB	cacheId2	20150316	16	reqB
+clusterIdC	cacheId3	20150316	16	reqA
+clusterIdD	cacheId4	20150316	16	reqD
+clusterIdA	cacheId5	20150316	16	reqA
+clusterIdD	cacheId8	20150316	16	reqA
+clusterIdB	cacheId9	20150316	16	reqD
+PREHOOK: query: show partitions tdp
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@tdp
+POSTHOOK: query: show partitions tdp
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@tdp
+dataint=20150316/hour=16/req=reqA
+dataint=20150316/hour=16/req=reqB
+dataint=20150316/hour=16/req=reqD

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
index d223234..c3ede05 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
@@ -540,7 +540,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -677,7 +677,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -709,7 +709,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -726,7 +726,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -758,7 +758,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -775,7 +775,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
index f884ace..16a6e72 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
@@ -540,7 +540,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -677,7 +677,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -709,7 +709,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -726,7 +726,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -758,7 +758,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -775,7 +775,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ff1f5b1a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
index 541944d..7bf4a21 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
@@ -486,7 +486,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.list_bucketing_dynamic_part
                       partition_columns hr
-                      partition_columns.types string:string
+                      partition_columns.types string
                       serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -623,7 +623,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -655,7 +655,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -672,7 +672,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -704,7 +704,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.list_bucketing_dynamic_part
               partition_columns hr
-              partition_columns.types string:string
+              partition_columns.types string
               serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
@@ -721,7 +721,7 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.list_bucketing_dynamic_part
                 partition_columns hr
-                partition_columns.types string:string
+                partition_columns.types string
                 serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe


[24/24] hive git commit: HIVE-11811: LLAP: Merge master into branch (Prasanth Jayachandran)

Posted by pr...@apache.org.
HIVE-11811: LLAP: Merge master into branch (Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b0154f4c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b0154f4c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b0154f4c

Branch: refs/heads/llap
Commit: b0154f4c40650e2d74d65e5fb41bcd66df3932da
Parents: cb9fab7 da0be3d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon Sep 14 03:21:51 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon Sep 14 03:21:51 2015 -0500

----------------------------------------------------------------------
 beeline/src/main/resources/beeline-log4j2.xml   |    5 +-
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |    8 +-
 .../hive/common/jsonexplain/tez/Stage.java      |   14 +-
 .../common/jsonexplain/tez/TezJsonParser.java   |   17 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   13 +
 common/src/main/resources/hive-log4j2.xml       |    5 +-
 .../test/resources/hive-exec-log4j2-test.xml    |    5 +-
 common/src/test/resources/hive-log4j2-test.xml  |    5 +-
 data/conf/hive-log4j2.xml                       |    5 +-
 data/files/dynpartdata1.txt                     |    5 +
 data/files/dynpartdata2.txt                     |    6 +
 .../deployers/config/hive/hive-log4j2.xml       |    5 +-
 .../svr/src/main/config/webhcat-log4j2.xml      |    5 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   19 +-
 .../hive/metastore/tools/HiveMetaTool.java      |    5 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   24 +-
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |    4 +
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |    5 +
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   12 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |    7 +
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |   13 +-
 .../persistence/BytesBytesMultiHashMap.java     |   11 +-
 .../persistence/HybridHashTableContainer.java   |   68 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |    3 +
 .../hive/ql/exec/tez/HashTableLoader.java       |    7 +-
 .../hadoop/hive/ql/exec/tez/InPlaceUpdates.java |   65 +
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  |   70 +-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |    4 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |    9 +-
 .../serde/ParquetHiveArrayInspector.java        |   12 +
 .../ql/io/parquet/timestamp/NanoTimeUtils.java  |   23 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   25 +-
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |   18 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |   57 +-
 .../ql/optimizer/ReduceSinkMapJoinProc.java     |   19 +-
 .../calcite/reloperators/HiveLimit.java         |   57 -
 .../calcite/reloperators/HiveSort.java          |  110 -
 .../calcite/reloperators/HiveSortLimit.java     |  110 +
 .../calcite/stats/HiveRelMdMemory.java          |    9 +-
 .../calcite/stats/HiveRelMdParallelism.java     |    4 +-
 .../calcite/translator/ASTConverter.java        |   24 +-
 .../calcite/translator/HiveOpConverter.java     |    8 +-
 .../translator/PlanModifierForASTConv.java      |   10 +-
 .../translator/PlanModifierForReturnPath.java   |    4 -
 .../calcite/translator/PlanModifierUtil.java    |    4 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   17 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    2 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    2 +-
 .../sqlstd/SQLStdHiveAccessController.java      |    5 +
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |   84 +-
 ql/src/main/resources/hive-exec-log4j2.xml      |    5 +-
 ql/src/main/resources/tez-container-log4j2.xml  |    5 +-
 .../serde/TestParquetTimestampUtils.java        |   38 +-
 ...nMapRedUtilsUsePartitionColumnsNegative.java |   73 +
 ...nMapRedUtilsUsePartitionColumnsPositive.java |   61 +
 .../authorization_set_show_current_role.q       |    3 +
 .../clientpositive/bucket_map_join_tez1.q       |   31 +
 .../queries/clientpositive/cbo_rp_auto_join17.q |   14 +
 .../cbo_rp_cross_product_check_2.q              |   31 +
 .../test/queries/clientpositive/dynpart_merge.q |   28 +
 .../parquet_mixed_partition_formats.q           |   42 +
 .../clientpositive/parquet_ppd_boolean.q        |   42 +-
 .../queries/clientpositive/parquet_ppd_char.q   |   46 +-
 .../queries/clientpositive/parquet_ppd_date.q   |   64 +-
 .../clientpositive/parquet_ppd_decimal.q        |  106 +-
 .../clientpositive/parquet_ppd_timestamp.q      |   62 +-
 .../clientpositive/parquet_ppd_varchar.q        |   46 +-
 .../clientpositive/parquet_predicate_pushdown.q |   20 +-
 .../authorization_explain.q.java1.7.out         |    2 +-
 .../authorization_explain.q.java1.8.out         |    2 +-
 .../authorization_set_show_current_role.q.out   |    8 +
 .../clientpositive/cbo_rp_auto_join17.q.out     |  118 +
 .../cbo_rp_cross_product_check_2.q.out          |  699 ++++
 .../results/clientpositive/dynpart_merge.q.out  |   99 +
 .../clientpositive/explain_dependency.q.out     |   18 +-
 .../clientpositive/explain_dependency2.q.out    |   16 +-
 .../results/clientpositive/input4.q.java1.7.out |    2 +-
 .../results/clientpositive/input4.q.java1.8.out |    2 +-
 .../results/clientpositive/join0.q.java1.7.out  |    2 +-
 .../results/clientpositive/join0.q.java1.8.out  |    4 +-
 .../list_bucket_dml_6.q.java1.7.out             |   12 +-
 .../list_bucket_dml_6.q.java1.8.out             |   12 +-
 .../clientpositive/list_bucket_dml_7.q.out      |   12 +-
 .../results/clientpositive/parallel_join0.q.out |    2 +-
 .../parquet_mixed_partition_formats.q.out       |  303 ++
 .../clientpositive/parquet_ppd_boolean.q.out    |  194 +-
 .../clientpositive/parquet_ppd_char.q.out       |  224 +-
 .../clientpositive/parquet_ppd_date.q.out       |  324 +-
 .../clientpositive/parquet_ppd_decimal.q.out    |  594 +++-
 .../clientpositive/parquet_ppd_timestamp.q.out  |  314 +-
 .../clientpositive/parquet_ppd_varchar.q.out    |  224 +-
 .../parquet_predicate_pushdown.q.out            |  118 +-
 .../clientpositive/plan_json.q.java1.7.out      |    2 +-
 .../clientpositive/plan_json.q.java1.8.out      |    2 +-
 .../spark/bucket_map_join_tez1.q.out            |  357 ++
 .../tez/bucket_map_join_tez1.q.out              |  333 ++
 .../clientpositive/tez/constprog_dpp.q.out      |    4 +-
 .../clientpositive/tez/explainuser_1.q.out      |  496 +--
 .../clientpositive/tez/explainuser_2.q.out      | 3280 +++++++++---------
 .../clientpositive/tez/explainuser_3.q.out      |   10 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java |   10 +-
 .../thrift/RetryingThriftCLIServiceClient.java  |  331 ++
 .../cli/TestRetryingThriftCLIServiceClient.java |  133 +
 testutils/ptest2/src/main/resources/log4j2.xml  |    5 +-
 104 files changed, 6795 insertions(+), 3218 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/common/src/main/resources/hive-log4j2.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 540085a,92e5446..12da0f8
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@@ -364,27 -360,21 +365,33 @@@ public abstract class Operator<T extend
      // derived classes can set this to different object if needed
      outputObjInspector = inputObjInspectors[0];
  
 -    Collection<Future<?>> asyncInitOperations = initializeOp(hconf);
 +    boolean isInitOk = false;
 +    try {
 +      initializeOp(hconf);
 +      // sanity checks
 +      if (!rootInitializeCalled
 +          || childOperatorsArray.length != childOperators.size()) {
 +        throw new AssertionError("Internal error during operator initialization");
 +      }
 +      if (isLogInfoEnabled) {
 +        LOG.info("Initialization Done " + id + " " + getName());
 +      }
  
 -    // sanity checks
 -    if (!rootInitializeCalled
 -	|| asyncInitOperations == null
 -	|| childOperatorsArray.length != childOperators.size()) {
 -      throw new AssertionError("Internal error during operator initialization");
 +      initializeChildren(hconf);
 +      isInitOk = true;
 +    } finally {
 +      // TODO: ugly hack because Java doesn't have dtors and Tez input hangs on shutdown.
 +      if (!isInitOk) {
 +        cancelAsyncInitOps();
 +      }
      }
  
+     if (isLogInfoEnabled) {
+       LOG.info("Initialization Done " + id + " " + getName() + " done is reset.");
+     }
+ 
+     initializeChildren(hconf);
+ 
      // let's wait on the async ops before continuing
      completeInitialization(asyncInitOperations);
    }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
index 0000000,6ecfe71..1c06692
mode 000000,100644..100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
@@@ -1,0 -1,65 +1,65 @@@
+ package org.apache.hadoop.hive.ql.exec.tez;
+ 
+ import static org.fusesource.jansi.Ansi.ansi;
+ import static org.fusesource.jansi.internal.CLibrary.STDERR_FILENO;
+ import static org.fusesource.jansi.internal.CLibrary.STDOUT_FILENO;
+ import static org.fusesource.jansi.internal.CLibrary.isatty;
+ 
+ import java.io.PrintStream;
+ 
 -import jline.TerminalFactory;
 -
+ import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.ql.session.SessionState;
+ import org.fusesource.jansi.Ansi;
+ 
++import jline.TerminalFactory;
++
+ public class InPlaceUpdates {
+ 
 -  private static final int MIN_TERMINAL_WIDTH = 80;
++  public static final int MIN_TERMINAL_WIDTH = 80;
+ 
+   static boolean isUnixTerminal() {
+ 
+     String os = System.getProperty("os.name");
+     if (os.startsWith("Windows")) {
+       // we do not support Windows, we will revisit this if we really need it for windows.
+       return false;
+     }
+ 
+     // We must be on some unix variant..
+     // check if standard out is a terminal
+     try {
+       // isatty system call will return 1 if the file descriptor is terminal else 0
+       if (isatty(STDOUT_FILENO) == 0) {
+         return false;
+       }
+       if (isatty(STDERR_FILENO) == 0) {
+         return false;
+       }
+     } catch (NoClassDefFoundError ignore) {
+       // These errors happen if the JNI lib is not available for your platform.
+       return false;
+     } catch (UnsatisfiedLinkError ignore) {
+       // These errors happen if the JNI lib is not available for your platform.
+       return false;
+     }
+     return true;
+   }
+ 
+   public static boolean inPlaceEligible(HiveConf conf) {
+     boolean inPlaceUpdates = HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS);
+ 
+     // we need at least 80 chars wide terminal to display in-place updates properly
+     return inPlaceUpdates && !SessionState.getConsole().getIsSilent() && isUnixTerminal()
+       && TerminalFactory.get().getWidth() >= MIN_TERMINAL_WIDTH;
+   }
+ 
+   public static void reprintLine(PrintStream out, String line) {
+     out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString());
+     out.flush();
+   }
+ 
+   public static void rePositionCursor(PrintStream ps) {
+     ps.print(ansi().cursorUp(0).toString());
+     ps.flush();
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
index 28c7d20,1e1603b..1dfa092
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
@@@ -20,25 -20,7 +20,22 @@@ package org.apache.hadoop.hive.ql.exec.
  
  import static org.apache.tez.dag.api.client.DAGStatus.State.RUNNING;
  import static org.fusesource.jansi.Ansi.ansi;
- import static org.fusesource.jansi.internal.CLibrary.STDERR_FILENO;
- import static org.fusesource.jansi.internal.CLibrary.STDOUT_FILENO;
- import static org.fusesource.jansi.internal.CLibrary.isatty;
  
 +import java.io.IOException;
 +import java.io.PrintStream;
 +import java.text.DecimalFormat;
 +import java.text.NumberFormat;
 +import java.util.Collections;
 +import java.util.EnumSet;
 +import java.util.HashSet;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Locale;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
  import org.apache.hadoop.hive.ql.exec.Heartbeater;
@@@ -65,8 -46,23 +62,6 @@@ import org.fusesource.jansi.Ansi
  
  import com.google.common.base.Preconditions;
  
 -import java.io.IOException;
 -import java.io.PrintStream;
 -import java.text.DecimalFormat;
 -import java.text.NumberFormat;
 -import java.util.Collections;
 -import java.util.EnumSet;
 -import java.util.HashSet;
 -import java.util.LinkedList;
 -import java.util.List;
 -import java.util.Locale;
 -import java.util.Map;
 -import java.util.Set;
 -import java.util.SortedSet;
 -import java.util.TreeSet;
 -
--import jline.TerminalFactory;
--
  /**
   * TezJobMonitor keeps track of a tez job while it's being executed. It will
   * print status to the console and retrieve final status of the job after
@@@ -75,9 -71,9 +70,9 @@@
  public class TezJobMonitor {
  
    private static final String CLASS_NAME = TezJobMonitor.class.getName();
-   private static final int MIN_TERMINAL_WIDTH = 94;
+ 
    private static final int COLUMN_1_WIDTH = 16;
-   private static final int SEPARATOR_WIDTH = MIN_TERMINAL_WIDTH;
 -  private static final int SEPARATOR_WIDTH = 80;
++  private static final int SEPARATOR_WIDTH = InPlaceUpdates.MIN_TERMINAL_WIDTH;
  
    // keep this within 80 chars width. If more columns needs to be added then update min terminal
    // width requirement and separator width accordingly

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/main/resources/hive-exec-log4j2.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b0154f4c/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
----------------------------------------------------------------------


[04/24] hive git commit: HIVE-11761: DoubleWritable hashcode for GroupBy is not properly generated (Aihua Xu, reviewed by Chao Sun)

Posted by pr...@apache.org.
HIVE-11761: DoubleWritable hashcode for GroupBy is not properly generated (Aihua Xu, reviewed by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70144073
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70144073
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70144073

Branch: refs/heads/llap
Commit: 70144073466fb72b5903ede37d65b159d44a367a
Parents: 7a71e50
Author: Aihua Xu <ai...@gmail.com>
Authored: Thu Sep 10 10:18:24 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Thu Sep 10 10:18:33 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java   | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/70144073/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
index 22bd951..1c409a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.Object
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.DoubleWritable;
 import org.apache.hadoop.io.Text;
 
 public class KeyWrapperFactory {
@@ -114,6 +115,9 @@ public class KeyWrapperFactory {
             if(element instanceof LazyDouble) {
               long v = Double.doubleToLongBits(((LazyDouble)element).getWritableObject().get());
               hashcode = hashcode + (int) (v ^ (v >>> 32));
+            } else if (element instanceof DoubleWritable){
+              long v = Double.doubleToLongBits(((DoubleWritable)element).get());
+              hashcode = hashcode + (int) (v ^ (v >>> 32));
             } else {
               hashcode = hashcode + element.hashCode();
             }


[06/24] hive git commit: HIVE-11510 : Metatool updateLocation warning on views (Wei Zheng via Sushanth Sowmyan)

Posted by pr...@apache.org.
HIVE-11510 : Metatool updateLocation warning on views (Wei Zheng via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/27bf8f0f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/27bf8f0f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/27bf8f0f

Branch: refs/heads/llap
Commit: 27bf8f0f70af198ce2c5d939046ca61ab7414585
Parents: b4be31f
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu Sep 10 12:01:38 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu Sep 10 12:14:57 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/metastore/ObjectStore.java       | 19 +++++++++++++++++--
 .../hive/metastore/tools/HiveMetaTool.java       |  5 +++++
 2 files changed, 22 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/27bf8f0f/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index d165fc8..4d6bfcc 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -5930,11 +5930,13 @@ public class ObjectStore implements RawStore, Configurable {
   public class UpdateMStorageDescriptorTblURIRetVal {
     private List<String> badRecords;
     private Map<String, String> updateLocations;
+    private int numNullRecords;
 
     UpdateMStorageDescriptorTblURIRetVal(List<String> badRecords,
-      Map<String, String> updateLocations) {
+      Map<String, String> updateLocations, int numNullRecords) {
       this.badRecords = badRecords;
       this.updateLocations = updateLocations;
+      this.numNullRecords = numNullRecords;
     }
 
     public List<String> getBadRecords() {
@@ -5952,6 +5954,14 @@ public class ObjectStore implements RawStore, Configurable {
     public void setUpdateLocations(Map<String, String> updateLocations) {
       this.updateLocations = updateLocations;
     }
+
+    public int getNumNullRecords() {
+      return numNullRecords;
+    }
+
+    public void setNumNullRecords(int numNullRecords) {
+      this.numNullRecords = numNullRecords;
+    }
   }
 
   /** The following APIs
@@ -5967,6 +5977,7 @@ public class ObjectStore implements RawStore, Configurable {
     Query query = null;
     Map<String, String> updateLocations = new HashMap<String, String>();
     List<String> badRecords = new ArrayList<String>();
+    int numNullRecords = 0;
     UpdateMStorageDescriptorTblURIRetVal retVal = null;
     try {
       openTransaction();
@@ -5976,6 +5987,10 @@ public class ObjectStore implements RawStore, Configurable {
       for (MStorageDescriptor mSDS : mSDSs) {
         URI locationURI = null;
         String location = mSDS.getLocation();
+        if (location == null) { // This can happen for View or Index
+          numNullRecords++;
+          continue;
+        }
         try {
           locationURI = new Path(location).toUri();
         } catch (IllegalArgumentException e) {
@@ -5995,7 +6010,7 @@ public class ObjectStore implements RawStore, Configurable {
       }
       committed = commitTransaction();
       if (committed) {
-        retVal = new UpdateMStorageDescriptorTblURIRetVal(badRecords, updateLocations);
+        retVal = new UpdateMStorageDescriptorTblURIRetVal(badRecords, updateLocations, numNullRecords);
       }
       return retVal;
     } finally {

http://git-wip-us.apache.org/repos/asf/hive/blob/27bf8f0f/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
index 411ac21..e4e9e3a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
@@ -222,6 +222,11 @@ public class HiveMetaTool {
           System.err.println("bad location URI: " + badRecord);
         }
       }
+      int numNullRecords = retVal.getNumNullRecords();
+      if (numNullRecords != 0) {
+        LOG.debug("Number of NULL location URI: " + numNullRecords +
+            ". This can happen for View or Index.");
+      }
     }
   }
 


[21/24] hive git commit: HIVE-11792: User explain in tez does not preserve ordering (Prasanth Jayachandran reviewed by Pengcheng Xiong)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index 566b451..8156789 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -192,13 +192,13 @@ Stage-0
          File Output Operator [FS_18]
             compressed:false
             Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_17]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_28]
                |  condition map:[{"":"Inner Join 0 to 1"}]
-               |  keys:{"1":"_col0 (type: string)","0":"_col3 (type: string)"}
+               |  keys:{"0":"_col3 (type: string)","1":"_col0 (type: string)"}
                |  outputColumnNames:["_col0","_col3","_col6"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                |<-Map 5 [SIMPLE_EDGE]
@@ -226,7 +226,7 @@ Stage-0
                      value expressions:_col0 (type: string)
                      Merge Join Operator [MERGEJOIN_27]
                      |  condition map:[{"":"Inner Join 0 to 1"}]
-                     |  keys:{"1":"_col1 (type: string)","0":"_col0 (type: string)"}
+                     |  keys:{"0":"_col0 (type: string)","1":"_col1 (type: string)"}
                      |  outputColumnNames:["_col0","_col3"]
                      |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                      |<-Map 1 [SIMPLE_EDGE]
@@ -338,7 +338,7 @@ Stage-0
          File Output Operator [FS_69]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_68]
                Number of rows:100
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -373,7 +373,7 @@ Stage-0
                                  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                  Merge Join Operator [MERGEJOIN_111]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"1":"_col15 (type: string), _col17 (type: string)","0":"_col1 (type: string), _col3 (type: string)"}
+                                 |  keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"}
                                  |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
                                  |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                  |<-Reducer 11 [SIMPLE_EDGE]
@@ -388,7 +388,7 @@ Stage-0
                                  |        Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                  |        Merge Join Operator [MERGEJOIN_110]
                                  |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"1":"_col2 (type: string), _col4 (type: string)","0":"_col4 (type: string), _col6 (type: string)"}
+                                 |        |  keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"}
                                  |        |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
                                  |        |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                  |        |<-Reducer 10 [SIMPLE_EDGE]
@@ -400,7 +400,7 @@ Stage-0
                                  |        |     value expressions:_col2 (type: string), _col3 (type: string)
                                  |        |     Merge Join Operator [MERGEJOIN_108]
                                  |        |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |     |  keys:{"1":"_col1 (type: string)","0":"_col3 (type: string)"}
+                                 |        |     |  keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"}
                                  |        |     |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                  |        |     |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
                                  |        |     |<-Map 14 [SIMPLE_EDGE]
@@ -427,7 +427,7 @@ Stage-0
                                  |        |           value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string)
                                  |        |           Merge Join Operator [MERGEJOIN_107]
                                  |        |           |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |           |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+                                 |        |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                                  |        |           |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                  |        |           |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
                                  |        |           |<-Map 13 [SIMPLE_EDGE]
@@ -454,7 +454,7 @@ Stage-0
                                  |        |                 value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
                                  |        |                 Merge Join Operator [MERGEJOIN_106]
                                  |        |                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |                 |  keys:{"1":"_col3 (type: string)","0":"_col1 (type: string)"}
+                                 |        |                 |  keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"}
                                  |        |                 |  outputColumnNames:["_col2","_col3","_col4","_col6"]
                                  |        |                 |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                                  |        |                 |<-Map 12 [SIMPLE_EDGE]
@@ -497,7 +497,7 @@ Stage-0
                                  |              value expressions:_col3 (type: string), _col5 (type: string)
                                  |              Merge Join Operator [MERGEJOIN_109]
                                  |              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |              |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |              |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                  |              |  outputColumnNames:["_col2","_col3","_col4","_col5"]
                                  |              |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
                                  |              |<-Map 15 [SIMPLE_EDGE]
@@ -540,7 +540,7 @@ Stage-0
                                        value expressions:_col2 (type: string)
                                        Merge Join Operator [MERGEJOIN_105]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                       |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                        |  outputColumnNames:["_col1","_col2","_col3"]
                                        |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
                                        |<-Map 1 [SIMPLE_EDGE]
@@ -616,13 +616,13 @@ Stage-0
          File Output Operator [FS_59]
             compressed:false
             Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_57]
             |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
             |<-Union 6 [SIMPLE_EDGE]
-               |<-Reducer 5 [CONTAINS]
+               |<-Reducer 15 [CONTAINS]
                |  Reduce Output Operator [RS_56]
                |     key expressions:_col0 (type: string), _col1 (type: string)
                |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
@@ -632,110 +632,110 @@ Stage-0
                |        keys:_col0 (type: string), _col1 (type: string)
                |        outputColumnNames:["_col0","_col1"]
                |        Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
-               |        Select Operator [SEL_25]
+               |        Select Operator [SEL_51]
                |           outputColumnNames:["_col0","_col1"]
                |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-               |           Merge Join Operator [MERGEJOIN_83]
+               |           Merge Join Operator [MERGEJOIN_85]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
-               |           |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+               |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                |           |  outputColumnNames:["_col1","_col2"]
                |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-               |           |<-Map 10 [SIMPLE_EDGE]
-               |           |  Reduce Output Operator [RS_23]
+               |           |<-Map 18 [SIMPLE_EDGE]
+               |           |  Reduce Output Operator [RS_49]
                |           |     key expressions:_col0 (type: string)
                |           |     Map-reduce partition columns:_col0 (type: string)
                |           |     sort order:+
                |           |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |           |     Select Operator [SEL_14]
+               |           |     Select Operator [SEL_40]
                |           |        outputColumnNames:["_col0"]
                |           |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |           |        Filter Operator [FIL_77]
+               |           |        Filter Operator [FIL_81]
                |           |           predicate:key is not null (type: boolean)
                |           |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |           |           TableScan [TS_13]
+               |           |           TableScan [TS_39]
                |           |              alias:y
                |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-               |           |<-Reducer 4 [SIMPLE_EDGE]
-               |              Reduce Output Operator [RS_21]
+               |           |<-Reducer 14 [SIMPLE_EDGE]
+               |              Reduce Output Operator [RS_47]
                |                 key expressions:_col2 (type: string)
                |                 Map-reduce partition columns:_col2 (type: string)
                |                 sort order:+
                |                 Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
                |                 value expressions:_col1 (type: string)
-               |                 Merge Join Operator [MERGEJOIN_82]
+               |                 Merge Join Operator [MERGEJOIN_84]
                |                 |  condition map:[{"":"Inner Join 0 to 1"}]
-               |                 |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+               |                 |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                |                 |  outputColumnNames:["_col1","_col2"]
                |                 |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
-               |                 |<-Map 9 [SIMPLE_EDGE]
-               |                 |  Reduce Output Operator [RS_18]
+               |                 |<-Map 17 [SIMPLE_EDGE]
+               |                 |  Reduce Output Operator [RS_44]
                |                 |     key expressions:_col1 (type: string)
                |                 |     Map-reduce partition columns:_col1 (type: string)
                |                 |     sort order:+
                |                 |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                |                 |     value expressions:_col0 (type: string)
-               |                 |     Select Operator [SEL_12]
+               |                 |     Select Operator [SEL_38]
                |                 |        outputColumnNames:["_col0","_col1"]
                |                 |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-               |                 |        Filter Operator [FIL_76]
+               |                 |        Filter Operator [FIL_80]
                |                 |           predicate:(value is not null and key is not null) (type: boolean)
                |                 |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-               |                 |           TableScan [TS_11]
+               |                 |           TableScan [TS_37]
                |                 |              alias:x
                |                 |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-               |                 |<-Reducer 3 [SIMPLE_EDGE]
-               |                    Reduce Output Operator [RS_16]
+               |                 |<-Reducer 13 [SIMPLE_EDGE]
+               |                    Reduce Output Operator [RS_42]
                |                       key expressions:_col1 (type: string)
                |                       Map-reduce partition columns:_col1 (type: string)
                |                       sort order:+
                |                       Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-               |                       Select Operator [SEL_10]
+               |                       Select Operator [SEL_36]
                |                          outputColumnNames:["_col1"]
                |                          Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-               |                          Group By Operator [GBY_9]
+               |                          Group By Operator [GBY_35]
                |                          |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
                |                          |  outputColumnNames:["_col0","_col1"]
                |                          |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-               |                          |<-Union 2 [SIMPLE_EDGE]
-               |                             |<-Map 1 [CONTAINS]
-               |                             |  Reduce Output Operator [RS_8]
+               |                          |<-Union 12 [SIMPLE_EDGE]
+               |                             |<-Map 11 [CONTAINS]
+               |                             |  Reduce Output Operator [RS_34]
                |                             |     key expressions:_col0 (type: string), _col1 (type: string)
                |                             |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                |                             |     sort order:++
                |                             |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |                             |     Group By Operator [GBY_7]
+               |                             |     Group By Operator [GBY_33]
                |                             |        keys:_col0 (type: string), _col1 (type: string)
                |                             |        outputColumnNames:["_col0","_col1"]
                |                             |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |                             |        Select Operator [SEL_1]
+               |                             |        Select Operator [SEL_27]
                |                             |           outputColumnNames:["_col0","_col1"]
                |                             |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-               |                             |           Filter Operator [FIL_74]
+               |                             |           Filter Operator [FIL_78]
                |                             |              predicate:value is not null (type: boolean)
                |                             |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-               |                             |              TableScan [TS_0]
+               |                             |              TableScan [TS_26]
                |                             |                 alias:x
                |                             |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-               |                             |<-Map 8 [CONTAINS]
-               |                                Reduce Output Operator [RS_8]
+               |                             |<-Map 16 [CONTAINS]
+               |                                Reduce Output Operator [RS_34]
                |                                   key expressions:_col0 (type: string), _col1 (type: string)
                |                                   Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                |                                   sort order:++
                |                                   Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |                                   Group By Operator [GBY_7]
+               |                                   Group By Operator [GBY_33]
                |                                      keys:_col0 (type: string), _col1 (type: string)
                |                                      outputColumnNames:["_col0","_col1"]
                |                                      Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |                                      Select Operator [SEL_3]
+               |                                      Select Operator [SEL_29]
                |                                         outputColumnNames:["_col0","_col1"]
                |                                         Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |                                         Filter Operator [FIL_75]
+               |                                         Filter Operator [FIL_79]
                |                                            predicate:value is not null (type: boolean)
                |                                            Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |                                            TableScan [TS_2]
+               |                                            TableScan [TS_28]
                |                                               alias:y
                |                                               Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 15 [CONTAINS]
+               |<-Reducer 5 [CONTAINS]
                   Reduce Output Operator [RS_56]
                      key expressions:_col0 (type: string), _col1 (type: string)
                      Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
@@ -745,107 +745,107 @@ Stage-0
                         keys:_col0 (type: string), _col1 (type: string)
                         outputColumnNames:["_col0","_col1"]
                         Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
-                        Select Operator [SEL_51]
+                        Select Operator [SEL_25]
                            outputColumnNames:["_col0","_col1"]
                            Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                           Merge Join Operator [MERGEJOIN_85]
+                           Merge Join Operator [MERGEJOIN_83]
                            |  condition map:[{"":"Inner Join 0 to 1"}]
-                           |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+                           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                            |  outputColumnNames:["_col1","_col2"]
                            |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                           |<-Map 18 [SIMPLE_EDGE]
-                           |  Reduce Output Operator [RS_49]
+                           |<-Map 10 [SIMPLE_EDGE]
+                           |  Reduce Output Operator [RS_23]
                            |     key expressions:_col0 (type: string)
                            |     Map-reduce partition columns:_col0 (type: string)
                            |     sort order:+
                            |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |     Select Operator [SEL_40]
+                           |     Select Operator [SEL_14]
                            |        outputColumnNames:["_col0"]
                            |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |        Filter Operator [FIL_81]
+                           |        Filter Operator [FIL_77]
                            |           predicate:key is not null (type: boolean)
                            |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |           TableScan [TS_39]
+                           |           TableScan [TS_13]
                            |              alias:y
                            |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                           |<-Reducer 14 [SIMPLE_EDGE]
-                              Reduce Output Operator [RS_47]
+                           |<-Reducer 4 [SIMPLE_EDGE]
+                              Reduce Output Operator [RS_21]
                                  key expressions:_col2 (type: string)
                                  Map-reduce partition columns:_col2 (type: string)
                                  sort order:+
                                  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
                                  value expressions:_col1 (type: string)
-                                 Merge Join Operator [MERGEJOIN_84]
+                                 Merge Join Operator [MERGEJOIN_82]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+                                 |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                                  |  outputColumnNames:["_col1","_col2"]
                                  |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Map 17 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_44]
+                                 |<-Map 9 [SIMPLE_EDGE]
+                                 |  Reduce Output Operator [RS_18]
                                  |     key expressions:_col1 (type: string)
                                  |     Map-reduce partition columns:_col1 (type: string)
                                  |     sort order:+
                                  |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                                  |     value expressions:_col0 (type: string)
-                                 |     Select Operator [SEL_38]
+                                 |     Select Operator [SEL_12]
                                  |        outputColumnNames:["_col0","_col1"]
                                  |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                                 |        Filter Operator [FIL_80]
+                                 |        Filter Operator [FIL_76]
                                  |           predicate:(value is not null and key is not null) (type: boolean)
                                  |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                                 |           TableScan [TS_37]
+                                 |           TableScan [TS_11]
                                  |              alias:x
                                  |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 13 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_42]
+                                 |<-Reducer 3 [SIMPLE_EDGE]
+                                    Reduce Output Operator [RS_16]
                                        key expressions:_col1 (type: string)
                                        Map-reduce partition columns:_col1 (type: string)
                                        sort order:+
                                        Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                                       Select Operator [SEL_36]
+                                       Select Operator [SEL_10]
                                           outputColumnNames:["_col1"]
                                           Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                                          Group By Operator [GBY_35]
+                                          Group By Operator [GBY_9]
                                           |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
                                           |  outputColumnNames:["_col0","_col1"]
                                           |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Union 12 [SIMPLE_EDGE]
-                                             |<-Map 11 [CONTAINS]
-                                             |  Reduce Output Operator [RS_34]
+                                          |<-Union 2 [SIMPLE_EDGE]
+                                             |<-Map 1 [CONTAINS]
+                                             |  Reduce Output Operator [RS_8]
                                              |     key expressions:_col0 (type: string), _col1 (type: string)
                                              |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                                              |     sort order:++
                                              |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                             |     Group By Operator [GBY_33]
+                                             |     Group By Operator [GBY_7]
                                              |        keys:_col0 (type: string), _col1 (type: string)
                                              |        outputColumnNames:["_col0","_col1"]
                                              |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                             |        Select Operator [SEL_27]
+                                             |        Select Operator [SEL_1]
                                              |           outputColumnNames:["_col0","_col1"]
                                              |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                                             |           Filter Operator [FIL_78]
+                                             |           Filter Operator [FIL_74]
                                              |              predicate:value is not null (type: boolean)
                                              |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                                             |              TableScan [TS_26]
+                                             |              TableScan [TS_0]
                                              |                 alias:x
                                              |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 16 [CONTAINS]
-                                                Reduce Output Operator [RS_34]
+                                             |<-Map 8 [CONTAINS]
+                                                Reduce Output Operator [RS_8]
                                                    key expressions:_col0 (type: string), _col1 (type: string)
                                                    Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                                                    sort order:++
                                                    Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                   Group By Operator [GBY_33]
+                                                   Group By Operator [GBY_7]
                                                       keys:_col0 (type: string), _col1 (type: string)
                                                       outputColumnNames:["_col0","_col1"]
                                                       Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                      Select Operator [SEL_29]
+                                                      Select Operator [SEL_3]
                                                          outputColumnNames:["_col0","_col1"]
                                                          Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                         Filter Operator [FIL_79]
+                                                         Filter Operator [FIL_75]
                                                             predicate:value is not null (type: boolean)
                                                             Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                            TableScan [TS_28]
+                                                            TableScan [TS_2]
                                                                alias:y
                                                                Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 
@@ -910,7 +910,7 @@ Stage-0
          File Output Operator [FS_119]
             compressed:false
             Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_117]
             |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
             |  outputColumnNames:["_col0","_col1"]
@@ -931,7 +931,7 @@ Stage-0
                |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           Merge Join Operator [MERGEJOIN_167]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
-               |           |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+               |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                |           |  outputColumnNames:["_col2","_col5"]
                |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           |<-Map 37 [SIMPLE_EDGE]
@@ -958,7 +958,7 @@ Stage-0
                |                 Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                |                 Merge Join Operator [MERGEJOIN_166]
                |                 |  condition map:[{"":"Inner Join 0 to 1"}]
-               |                 |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+               |                 |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                |                 |  outputColumnNames:["_col2"]
                |                 |  Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                |                 |<-Map 36 [SIMPLE_EDGE]
@@ -1112,7 +1112,7 @@ Stage-0
                         |  outputColumnNames:["_col0","_col1"]
                         |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                         |<-Union 6 [SIMPLE_EDGE]
-                           |<-Reducer 5 [CONTAINS]
+                           |<-Reducer 19 [CONTAINS]
                            |  Reduce Output Operator [RS_65]
                            |     key expressions:_col0 (type: string), _col1 (type: string)
                            |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
@@ -1122,110 +1122,144 @@ Stage-0
                            |        keys:_col0 (type: string), _col1 (type: string)
                            |        outputColumnNames:["_col0","_col1"]
                            |        Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
-                           |        Select Operator [SEL_25]
+                           |        Select Operator [SEL_60]
                            |           outputColumnNames:["_col0","_col1"]
                            |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                           |           Merge Join Operator [MERGEJOIN_163]
+                           |           Merge Join Operator [MERGEJOIN_165]
                            |           |  condition map:[{"":"Inner Join 0 to 1"}]
-                           |           |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+                           |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                            |           |  outputColumnNames:["_col2","_col5"]
                            |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                           |           |<-Map 12 [SIMPLE_EDGE]
-                           |           |  Reduce Output Operator [RS_23]
+                           |           |<-Map 23 [SIMPLE_EDGE]
+                           |           |  Reduce Output Operator [RS_58]
                            |           |     key expressions:_col0 (type: string)
                            |           |     Map-reduce partition columns:_col0 (type: string)
                            |           |     sort order:+
                            |           |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                            |           |     value expressions:_col1 (type: string)
-                           |           |     Select Operator [SEL_14]
+                           |           |     Select Operator [SEL_49]
                            |           |        outputColumnNames:["_col0","_col1"]
                            |           |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |           |        Filter Operator [FIL_150]
+                           |           |        Filter Operator [FIL_155]
                            |           |           predicate:key is not null (type: boolean)
                            |           |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |           |           TableScan [TS_13]
+                           |           |           TableScan [TS_48]
                            |           |              alias:y
                            |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                           |           |<-Reducer 4 [SIMPLE_EDGE]
-                           |              Reduce Output Operator [RS_21]
+                           |           |<-Reducer 18 [SIMPLE_EDGE]
+                           |              Reduce Output Operator [RS_56]
                            |                 key expressions:_col2 (type: string)
                            |                 Map-reduce partition columns:_col2 (type: string)
                            |                 sort order:+
-                           |                 Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
-                           |                 Merge Join Operator [MERGEJOIN_162]
+                           |                 Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                           |                 Merge Join Operator [MERGEJOIN_164]
                            |                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                           |                 |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+                           |                 |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                            |                 |  outputColumnNames:["_col2"]
-                           |                 |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
-                           |                 |<-Map 11 [SIMPLE_EDGE]
-                           |                 |  Reduce Output Operator [RS_18]
+                           |                 |  Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+                           |                 |<-Map 22 [SIMPLE_EDGE]
+                           |                 |  Reduce Output Operator [RS_53]
                            |                 |     key expressions:_col1 (type: string)
                            |                 |     Map-reduce partition columns:_col1 (type: string)
                            |                 |     sort order:+
                            |                 |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                            |                 |     value expressions:_col0 (type: string)
-                           |                 |     Select Operator [SEL_12]
+                           |                 |     Select Operator [SEL_47]
                            |                 |        outputColumnNames:["_col0","_col1"]
                            |                 |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                           |                 |        Filter Operator [FIL_149]
+                           |                 |        Filter Operator [FIL_154]
                            |                 |           predicate:(value is not null and key is not null) (type: boolean)
                            |                 |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                           |                 |           TableScan [TS_11]
+                           |                 |           TableScan [TS_46]
                            |                 |              alias:x
                            |                 |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                           |                 |<-Reducer 3 [SIMPLE_EDGE]
-                           |                    Reduce Output Operator [RS_16]
+                           |                 |<-Reducer 17 [SIMPLE_EDGE]
+                           |                    Reduce Output Operator [RS_51]
                            |                       key expressions:_col1 (type: string)
                            |                       Map-reduce partition columns:_col1 (type: string)
                            |                       sort order:+
-                           |                       Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                           |                       Select Operator [SEL_10]
+                           |                       Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
+                           |                       Select Operator [SEL_45]
                            |                          outputColumnNames:["_col1"]
-                           |                          Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                           |                          Group By Operator [GBY_9]
+                           |                          Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
+                           |                          Group By Operator [GBY_44]
                            |                          |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
                            |                          |  outputColumnNames:["_col0","_col1"]
-                           |                          |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                           |                          |<-Union 2 [SIMPLE_EDGE]
-                           |                             |<-Map 1 [CONTAINS]
-                           |                             |  Reduce Output Operator [RS_8]
+                           |                          |  Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
+                           |                          |<-Union 16 [SIMPLE_EDGE]
+                           |                             |<-Map 21 [CONTAINS]
+                           |                             |  Reduce Output Operator [RS_43]
                            |                             |     key expressions:_col0 (type: string), _col1 (type: string)
                            |                             |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                            |                             |     sort order:++
-                           |                             |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                           |                             |     Group By Operator [GBY_7]
+                           |                             |     Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+                           |                             |     Group By Operator [GBY_42]
                            |                             |        keys:_col0 (type: string), _col1 (type: string)
                            |                             |        outputColumnNames:["_col0","_col1"]
-                           |                             |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                           |                             |        Select Operator [SEL_1]
+                           |                             |        Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+                           |                             |        Select Operator [SEL_38]
                            |                             |           outputColumnNames:["_col0","_col1"]
-                           |                             |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                           |                             |           Filter Operator [FIL_147]
+                           |                             |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                           |                             |           Filter Operator [FIL_153]
                            |                             |              predicate:value is not null (type: boolean)
-                           |                             |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                           |                             |              TableScan [TS_0]
-                           |                             |                 alias:x
-                           |                             |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                           |                             |<-Map 10 [CONTAINS]
-                           |                                Reduce Output Operator [RS_8]
+                           |                             |              Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                           |                             |              TableScan [TS_37]
+                           |                             |                 alias:y
+                           |                             |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                           |                             |<-Reducer 15 [CONTAINS]
+                           |                                Reduce Output Operator [RS_43]
                            |                                   key expressions:_col0 (type: string), _col1 (type: string)
                            |                                   Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                            |                                   sort order:++
-                           |                                   Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                           |                                   Group By Operator [GBY_7]
+                           |                                   Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+                           |                                   Group By Operator [GBY_42]
                            |                                      keys:_col0 (type: string), _col1 (type: string)
                            |                                      outputColumnNames:["_col0","_col1"]
-                           |                                      Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                           |                                      Select Operator [SEL_3]
-                           |                                         outputColumnNames:["_col0","_col1"]
-                           |                                         Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |                                         Filter Operator [FIL_148]
-                           |                                            predicate:value is not null (type: boolean)
-                           |                                            Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           |                                            TableScan [TS_2]
-                           |                                               alias:y
-                           |                                               Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                           |<-Reducer 19 [CONTAINS]
+                           |                                      Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+                           |                                      Group By Operator [GBY_35]
+                           |                                      |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
+                           |                                      |  outputColumnNames:["_col0","_col1"]
+                           |                                      |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+                           |                                      |<-Union 14 [SIMPLE_EDGE]
+                           |                                         |<-Map 13 [CONTAINS]
+                           |                                         |  Reduce Output Operator [RS_34]
+                           |                                         |     key expressions:_col0 (type: string), _col1 (type: string)
+                           |                                         |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+                           |                                         |     sort order:++
+                           |                                         |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                           |                                         |     Group By Operator [GBY_33]
+                           |                                         |        keys:_col0 (type: string), _col1 (type: string)
+                           |                                         |        outputColumnNames:["_col0","_col1"]
+                           |                                         |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                           |                                         |        Select Operator [SEL_27]
+                           |                                         |           outputColumnNames:["_col0","_col1"]
+                           |                                         |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                           |                                         |           Filter Operator [FIL_151]
+                           |                                         |              predicate:value is not null (type: boolean)
+                           |                                         |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                           |                                         |              TableScan [TS_26]
+                           |                                         |                 alias:x
+                           |                                         |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                           |                                         |<-Map 20 [CONTAINS]
+                           |                                            Reduce Output Operator [RS_34]
+                           |                                               key expressions:_col0 (type: string), _col1 (type: string)
+                           |                                               Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+                           |                                               sort order:++
+                           |                                               Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                           |                                               Group By Operator [GBY_33]
+                           |                                                  keys:_col0 (type: string), _col1 (type: string)
+                           |                                                  outputColumnNames:["_col0","_col1"]
+                           |                                                  Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                           |                                                  Select Operator [SEL_29]
+                           |                                                     outputColumnNames:["_col0","_col1"]
+                           |                                                     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                           |                                                     Filter Operator [FIL_152]
+                           |                                                        predicate:value is not null (type: boolean)
+                           |                                                        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                           |                                                        TableScan [TS_28]
+                           |                                                           alias:y
+                           |                                                           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                           |<-Reducer 5 [CONTAINS]
                               Reduce Output Operator [RS_65]
                                  key expressions:_col0 (type: string), _col1 (type: string)
                                  Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
@@ -1235,143 +1269,109 @@ Stage-0
                                     keys:_col0 (type: string), _col1 (type: string)
                                     outputColumnNames:["_col0","_col1"]
                                     Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
-                                    Select Operator [SEL_60]
+                                    Select Operator [SEL_25]
                                        outputColumnNames:["_col0","_col1"]
                                        Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                                       Merge Join Operator [MERGEJOIN_165]
+                                       Merge Join Operator [MERGEJOIN_163]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"1":"_col0 (type: string)","0":"_col2 (type: string)"}
+                                       |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
                                        |  outputColumnNames:["_col2","_col5"]
                                        |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 23 [SIMPLE_EDGE]
-                                       |  Reduce Output Operator [RS_58]
+                                       |<-Map 12 [SIMPLE_EDGE]
+                                       |  Reduce Output Operator [RS_23]
                                        |     key expressions:_col0 (type: string)
                                        |     Map-reduce partition columns:_col0 (type: string)
                                        |     sort order:+
                                        |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                                        |     value expressions:_col1 (type: string)
-                                       |     Select Operator [SEL_49]
+                                       |     Select Operator [SEL_14]
                                        |        outputColumnNames:["_col0","_col1"]
                                        |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                       |        Filter Operator [FIL_155]
+                                       |        Filter Operator [FIL_150]
                                        |           predicate:key is not null (type: boolean)
                                        |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                       |           TableScan [TS_48]
+                                       |           TableScan [TS_13]
                                        |              alias:y
                                        |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Reducer 18 [SIMPLE_EDGE]
-                                          Reduce Output Operator [RS_56]
+                                       |<-Reducer 4 [SIMPLE_EDGE]
+                                          Reduce Output Operator [RS_21]
                                              key expressions:_col2 (type: string)
                                              Map-reduce partition columns:_col2 (type: string)
                                              sort order:+
-                                             Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                                             Merge Join Operator [MERGEJOIN_164]
+                                             Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+                                             Merge Join Operator [MERGEJOIN_162]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+                                             |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                                              |  outputColumnNames:["_col2"]
-                                             |  Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 22 [SIMPLE_EDGE]
-                                             |  Reduce Output Operator [RS_53]
+                                             |  Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+                                             |<-Map 11 [SIMPLE_EDGE]
+                                             |  Reduce Output Operator [RS_18]
                                              |     key expressions:_col1 (type: string)
                                              |     Map-reduce partition columns:_col1 (type: string)
                                              |     sort order:+
                                              |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                                              |     value expressions:_col0 (type: string)
-                                             |     Select Operator [SEL_47]
+                                             |     Select Operator [SEL_12]
                                              |        outputColumnNames:["_col0","_col1"]
                                              |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                                             |        Filter Operator [FIL_154]
+                                             |        Filter Operator [FIL_149]
                                              |           predicate:(value is not null and key is not null) (type: boolean)
                                              |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_46]
+                                             |           TableScan [TS_11]
                                              |              alias:x
                                              |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Reducer 17 [SIMPLE_EDGE]
-                                                Reduce Output Operator [RS_51]
+                                             |<-Reducer 3 [SIMPLE_EDGE]
+                                                Reduce Output Operator [RS_16]
                                                    key expressions:_col1 (type: string)
                                                    Map-reduce partition columns:_col1 (type: string)
                                                    sort order:+
-                                                   Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
-                                                   Select Operator [SEL_45]
+                                                   Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+                                                   Select Operator [SEL_10]
                                                       outputColumnNames:["_col1"]
-                                                      Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
-                                                      Group By Operator [GBY_44]
+                                                      Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+                                                      Group By Operator [GBY_9]
                                                       |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
                                                       |  outputColumnNames:["_col0","_col1"]
-                                                      |  Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
-                                                      |<-Union 16 [SIMPLE_EDGE]
-                                                         |<-Map 21 [CONTAINS]
-                                                         |  Reduce Output Operator [RS_43]
+                                                      |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+                                                      |<-Union 2 [SIMPLE_EDGE]
+                                                         |<-Map 1 [CONTAINS]
+                                                         |  Reduce Output Operator [RS_8]
                                                          |     key expressions:_col0 (type: string), _col1 (type: string)
                                                          |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                                                          |     sort order:++
-                                                         |     Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-                                                         |     Group By Operator [GBY_42]
+                                                         |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                                                         |     Group By Operator [GBY_7]
                                                          |        keys:_col0 (type: string), _col1 (type: string)
                                                          |        outputColumnNames:["_col0","_col1"]
-                                                         |        Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-                                                         |        Select Operator [SEL_38]
+                                                         |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                                                         |        Select Operator [SEL_1]
                                                          |           outputColumnNames:["_col0","_col1"]
-                                                         |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                         |           Filter Operator [FIL_153]
+                                                         |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                                                         |           Filter Operator [FIL_147]
                                                          |              predicate:value is not null (type: boolean)
-                                                         |              Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                         |              TableScan [TS_37]
-                                                         |                 alias:y
-                                                         |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                                         |<-Reducer 15 [CONTAINS]
-                                                            Reduce Output Operator [RS_43]
+                                                         |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                                                         |              TableScan [TS_0]
+                                                         |                 alias:x
+                                                         |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                                         |<-Map 10 [CONTAINS]
+                                                            Reduce Output Operator [RS_8]
                                                                key expressions:_col0 (type: string), _col1 (type: string)
                                                                Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
                                                                sort order:++
-                                                               Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-                                                               Group By Operator [GBY_42]
+                                                               Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                                                               Group By Operator [GBY_7]
                                                                   keys:_col0 (type: string), _col1 (type: string)
                                                                   outputColumnNames:["_col0","_col1"]
-                                                                  Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-                                                                  Group By Operator [GBY_35]
-                                                                  |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
-                                                                  |  outputColumnNames:["_col0","_col1"]
-                                                                  |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-                                                                  |<-Union 14 [SIMPLE_EDGE]
-                                                                     |<-Map 13 [CONTAINS]
-                                                                     |  Reduce Output Operator [RS_34]
-                                                                     |     key expressions:_col0 (type: string), _col1 (type: string)
-                                                                     |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
-                                                                     |     sort order:++
-                                                                     |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                                     |     Group By Operator [GBY_33]
-                                                                     |        keys:_col0 (type: string), _col1 (type: string)
-                                                                     |        outputColumnNames:["_col0","_col1"]
-                                                                     |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                                     |        Select Operator [SEL_27]
-                                                                     |           outputColumnNames:["_col0","_col1"]
-                                                                     |           Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                                                                     |           Filter Operator [FIL_151]
-                                                                     |              predicate:value is not null (type: boolean)
-                                                                     |              Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                                                                     |              TableScan [TS_26]
-                                                                     |                 alias:x
-                                                                     |                 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                                                     |<-Map 20 [CONTAINS]
-                                                                        Reduce Output Operator [RS_34]
-                                                                           key expressions:_col0 (type: string), _col1 (type: string)
-                                                                           Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
-                                                                           sort order:++
-                                                                           Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                                           Group By Operator [GBY_33]
-                                                                              keys:_col0 (type: string), _col1 (type: string)
-                                                                              outputColumnNames:["_col0","_col1"]
-                                                                              Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-                                                                              Select Operator [SEL_29]
-                                                                                 outputColumnNames:["_col0","_col1"]
-                                                                                 Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                                                 Filter Operator [FIL_152]
-                                                                                    predicate:value is not null (type: boolean)
-                                                                                    Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                                                                                    TableScan [TS_28]
-                                                                                       alias:y
-                                                                                       Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                                  Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+                                                                  Select Operator [SEL_3]
+                                                                     outputColumnNames:["_col0","_col1"]
+                                                                     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                                                                     Filter Operator [FIL_148]
+                                                                        predicate:value is not null (type: boolean)
+                                                                        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                                                                        TableScan [TS_2]
+                                                                           alias:y
+                                                                           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 
 PREHOOK: query: EXPLAIN
 SELECT x.key, z.value, y.value
@@ -1396,7 +1396,7 @@ Stage-0
          File Output Operator [FS_18]
             compressed:false
             Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_17]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
@@ -1525,7 +1525,7 @@ Stage-0
          File Output Operator [FS_69]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_68]
                Number of rows:100
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -1605,7 +1605,7 @@ Stage-0
                                        Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                        Map Join Operator [MAPJOIN_110]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"Map 10":"_col2 (type: string), _col4 (type: string)","Map 3":"_col4 (type: string), _col6 (type: string)"}
+                                       |  keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"}
                                        |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
                                        |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                        |<-Map 10 [BROADCAST_EDGE]
@@ -1617,7 +1617,7 @@ Stage-0
                                        |     value expressions:_col3 (type: string), _col5 (type: string)
                                        |     Map Join Operator [MAPJOIN_109]
                                        |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |     |  keys:{"Map 10":"_col0 (type: string)","Map 9":"_col0 (type: string)"}
+                                       |     |  keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"}
                                        |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
                                        |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
                                        |     |<-Map 9 [BROADCAST_EDGE]
@@ -1755,7 +1755,7 @@ Stage-0
          File Output Operator [FS_59]
             compressed:false
             Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_57]
             |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
             |  outputColumnNames:["_col0","_col1"]
@@ -2022,7 +2022,7 @@ Stage-0
          File Output Operator [FS_119]
             compressed:false
             Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_117]
             |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
             |  outputColumnNames:["_col0","_col1"]
@@ -2043,7 +2043,7 @@ Stage-0
                |           Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           Map Join Operator [MAPJOIN_167]
                |           |  condition map:[{"":"Inner Join 0 to 1"}]
-               |           |  keys:{"Map 31":"_col0 (type: string)","Reducer 26":"_col2 (type: string)"}
+               |           |  keys:{"Reducer 26":"_col2 (type: string)","Map 31":"_col0 (type: string)"}
                |           |  outputColumnNames:["_col2","_col5"]
                |           |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                |           |<-Reducer 26 [BROADCAST_EDGE]
@@ -2054,7 +2054,7 @@ Stage-0
                |           |     Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                |           |     Map Join Operator [MAPJOIN_166]
                |           |     |  condition map:[{"":"Inner Join 0 to 1"}]
-               |           |     |  keys:{"Map 30":"_col1 (type: string)","Reducer 26":"_col1 (type: string)"}
+               |           |     |  keys:{"Reducer 26":"_col1 (type: string)","Map 30":"_col1 (type: string)"}
                |           |     |  outputColumnNames:["_col2"]
                |           |     |  Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
                |           |     |<-Map 30 [BROADCAST_EDGE]
@@ -2081,7 +2081,7 @@ Stage-0
                |           |           |  outputColumnNames:["_col0","_col1"]
                |           |           |  Statistics:Num rows: 220 Data size: 2332 Basic stats: COMPLETE Column stats: NONE
                |           |           |<-Union 25 [SIMPLE_EDGE]
-               |           |              |<-Reducer 24 [CONTAINS]
+               |           |              |<-Map 29 [CONTAINS]
                |           |              |  Reduce Output Operator [RS_94]
                |           |              |     key expressions:_col0 (type: string), _col1 (type: string)
                |           |              |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
@@ -2091,84 +2091,16 @@ Stage-0
                |           |              |        keys:_col0 (type: string), _col1 (type: string)
                |           |              |        outputColumnNames:["_col0","_col1"]
                |           |              |        Statistics:Num rows: 440 Data size: 4664 Basic stats: COMPLETE Column stats: NONE
-               |           |              |        Group By Operator [GBY_86]
-               |           |              |        |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
-               |           |              |        |  outputColumnNames:["_col0","_col1"]
-               |           |              |        |  Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
-               |           |              |        |<-Union 23 [SIMPLE_EDGE]
-               |           |              |           |<-Reducer 22 [CONTAINS]
-               |           |              |           |  Reduce Output Operator [RS_85]
-               |           |              |           |     key expressions:_col0 (type: string), _col1 (type: string)
-               |           |              |           |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
-               |           |              |           |     sort order:++
-               |           |              |           |     Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-               |           |              |           |     Group By Operator [GBY_84]
-               |           |              |           |        keys:_col0 (type: string), _col1 (type: string)
-               |           |              |           |        outputColumnNames:["_col0","_col1"]
-               |           |              |           |        Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
-               |           |              |           |        Group By Operator [GBY_77]
-               |           |              |           |        |  keys:KEY._col0 (type: string), KEY._col1 (type: string)
-               |           |              |           |        |  outputColumnNames:["_col0","_col1"]
-               |           |              |           |        |  Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
-               |           |              |           |        |<-Union 21 [SIMPLE_EDGE]
-               |           |              |           |           |<-Map 20 [CONTAINS]
-               |           |              |           |           |  Reduce Output Operator [RS_76]
-               |           |              |           |           |     key expressions:_col0 (type: string), _col1 (type: string)
-               |           |              |           |           |     Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
-               |           |              |           |           |     sort order:++
-               |           |              |           |           |     Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |           |              |           |           |     Group By Operator [GBY_75]
-               |           |              |           |           |        keys:_col0 (type: string), _col1 (type: string)
-               |           |              |           |           |        outputColumnNames:["_col0","_col1"]
-               |           |              |           |           |        Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
-               |           |              |           |           |        Select Operator [SEL_69]
-               |           | 

<TRUNCATED>

[17/24] hive git commit: HIVE-11763: Use * instead of sum(hash(*)) on Parquet predicate (PPD) integration tests (Sergio Pena, reviewed by Ferdinand Xu)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
index aba302e..60c9a59 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -19,283 +19,417 @@ POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.da EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
 PREHOOK: query: -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where da='1970-02-20'
+select * from newtypestbl where da='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
 POSTHOOK: query: -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where da='1970-02-20'
+select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da='1970-02-20'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da='1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da= date '1970-02-20'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da= date '1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da= date '1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date)
+POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date)
+POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20))
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20))
+POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20))
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20))
+POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da!='1970-02-20'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da!='1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da!='1970-02-20'
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where da!='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da!='1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da!='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-27'
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-27'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-27'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<'1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-27'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-29'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<'1970-02-29'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-29'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-29'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-29'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<'1970-02-29'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-29'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-29'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-15'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<'1970-02-15'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-15'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-15'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-15'
+PREHOOK: query: select * from newtypestbl where da<'1970-02-15'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<'1970-02-15'
+POSTHOOK: query: select * from newtypestbl where da<'1970-02-15'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-20'
+PREHOOK: query: select * from newtypestbl where da<='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-20'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<='1970-02-20'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-20'
+POSTHOOK: query: select * from newtypestbl where da<='1970-02-20'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-27'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<='1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-27'
+POSTHOOK: query: select * from newtypestbl where da<='1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-27'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da<='1970-02-27'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da<='1970-02-27'
+POSTHOOK: query: select * from newtypestbl where da<='1970-02-27'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
+PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
+POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22'
+PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19'
+PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19'
+POSTHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
index 9e48df8..ec603eb 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -19,472 +19,750 @@ POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.da EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
 PREHOOK: query: -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where d=0.22
+select * from newtypestbl where d=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
 POSTHOOK: query: -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where d=0.22
+select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d=0.22
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d=0.22
+POSTHOOK: query: select * from newtypestbl where d=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d='0.22'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d='0.22'
+POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d='0.22'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d='0.22'
+POSTHOOK: query: select * from newtypestbl where d='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d=cast('0.22' as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d=cast('0.22' as float)
+POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d=cast('0.22' as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d=cast('0.22' as float)
+POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!=0.22
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!=0.22
+POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!=0.22
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d!=0.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!=0.22
+POSTHOOK: query: select * from newtypestbl where d!=0.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!='0.22'
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!='0.22'
+POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!='0.22'
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d!='0.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!='0.22'
+POSTHOOK: query: select * from newtypestbl where d!='0.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float)
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float)
+POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float)
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float)
+POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<11.22
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<11.22
+POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<11.22
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<11.22
+POSTHOOK: query: select * from newtypestbl where d<11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<'11.22'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<'11.22'
+POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<'11.22'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<'11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<'11.22'
+POSTHOOK: query: select * from newtypestbl where d<'11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<cast('11.22' as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<cast('11.22' as float)
+POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<cast('11.22' as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<cast('11.22' as float)
+POSTHOOK: query: select * from newtypestbl where d<cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<1
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<1
+POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<1
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<1
+POSTHOOK: query: select * from newtypestbl where d<1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22
+POSTHOOK: query: select * from newtypestbl where d<=11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=11.22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22
+POSTHOOK: query: select * from newtypestbl where d<=11.22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<='11.22'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<='11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<='11.22'
+POSTHOOK: query: select * from newtypestbl where d<='11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<='11.22'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<='11.22'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<='11.22'
+POSTHOOK: query: select * from newtypestbl where d<='11.22'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float)
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float)
+POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float)
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float)
+POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal)
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal)
+POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal)
+POSTHOOK: query: select * from newtypestbl where d<=cast('11.22' as decimal)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22BD
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=11.22BD
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22BD
+POSTHOOK: query: select * from newtypestbl where d<=11.22BD
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22BD
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=11.22BD
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=11.22BD
+POSTHOOK: query: select * from newtypestbl where d<=11.22BD
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=12
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=12
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=12
+POSTHOOK: query: select * from newtypestbl where d<=12
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d<=12
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d<=12
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d<=12
+POSTHOOK: query: select * from newtypestbl where d<=12
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0')
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0')
+POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0')
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0')
+POSTHOOK: query: select * from newtypestbl where d in ('0.22', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22')
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22')
+POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22')
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.22', '11.22')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22')
+POSTHOOK: query: select * from newtypestbl where d in ('0.22', '11.22')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0')
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0')
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0')
+PREHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0')
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', '1.0')
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22)
+PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22)
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22)
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
+POSTHOOK: query: select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1
+POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1
+POSTHOOK: query: select * from newtypestbl where d between 0 and 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1000
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and 1000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1000
+POSTHOOK: query: select * from newtypestbl where d between 0 and 1000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1000
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and 1000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and 1000
+POSTHOOK: query: select * from newtypestbl where d between 0 and 1000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and '2.0'
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and '2.0'
+POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and '2.0'
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and '2.0'
+POSTHOOK: query: select * from newtypestbl where d between 0 and '2.0'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float)
+POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float)
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float)
+POSTHOOK: query: select * from newtypestbl where d between 0 and cast(3 as float)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10))
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10))
+POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10))
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10))
+POSTHOOK: query: select * from newtypestbl where d between 1 and cast(30 as char(10))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27


[05/24] hive git commit: HIVE-11587 : Fix memory estimates for mapjoin hashtable (Wei Zheng, reviewed by Sergey Shelukhin)

Posted by pr...@apache.org.
HIVE-11587 : Fix memory estimates for mapjoin hashtable (Wei Zheng, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b4be31f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b4be31f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b4be31f4

Branch: refs/heads/llap
Commit: b4be31f4aa497cd09dd1e513eabe951044a7ff73
Parents: 7014407
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Sep 10 11:57:36 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Sep 10 11:57:36 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 +
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |  5 ++
 .../persistence/BytesBytesMultiHashMap.java     | 11 +++-
 .../persistence/HybridHashTableContainer.java   | 68 ++++++++++++--------
 .../hive/ql/exec/tez/HashTableLoader.java       |  7 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java | 10 ++-
 6 files changed, 68 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index d2c5885..7f29da2 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -767,6 +767,8 @@ public class HiveConf extends Configuration {
     HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
         "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
         "because memory-optimized hashtable cannot be serialized."),
+    HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
+        (float) 0.5, "Probing space percentage of the optimized hashtable"),
     HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
         "grace hash join as the join method for mapjoin. Tez only."),
     HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +

http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 1b9d7ef..a9159a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -592,6 +592,11 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
 
     // Deserialize the on-disk hash table
     // We're sure this part is smaller than memory limit
+    if (rowCount <= 0) {
+      rowCount = 1024 * 1024; // Since rowCount is used later to instantiate a BytesBytesMultiHashMap
+                              // as the initialCapacity which cannot be 0, we provide a reasonable
+                              // positive number here
+    }
     BytesBytesMultiHashMap restoredHashMap = partition.getHashMapFromDisk(rowCount);
     rowCount += restoredHashMap.getNumValues();
     LOG.info("Hybrid Grace Hash Join: Deserializing spilled hash partition...");

http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
index 3bba890..77c7ead 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
@@ -153,9 +153,11 @@ public final class BytesBytesMultiHashMap {
   /** 8 Gb of refs is the max capacity if memory limit is not specified. If someone has 100s of
    * Gbs of memory (this might happen pretty soon) we'd need to string together arrays anyway. */
   private final static int DEFAULT_MAX_CAPACITY = 1024 * 1024 * 1024;
+  /** Make sure maxCapacity has a lower limit */
+  private final static int DEFAULT_MIN_MAX_CAPACITY = 16 * 1024 * 1024;
 
   public BytesBytesMultiHashMap(int initialCapacity,
-      float loadFactor, int wbSize, long memUsage) {
+      float loadFactor, int wbSize, long maxProbeSize) {
     if (loadFactor < 0 || loadFactor > 1) {
       throw new AssertionError("Load factor must be between (0, 1].");
     }
@@ -163,8 +165,11 @@ public final class BytesBytesMultiHashMap {
     initialCapacity = (Long.bitCount(initialCapacity) == 1)
         ? initialCapacity : nextHighestPowerOfTwo(initialCapacity);
     // 8 bytes per long in the refs, assume data will be empty. This is just a sanity check.
-    int maxCapacity =  (memUsage <= 0) ? DEFAULT_MAX_CAPACITY
-        : (int)Math.min((long)DEFAULT_MAX_CAPACITY, memUsage / 8);
+    int maxCapacity =  (maxProbeSize <= 0) ? DEFAULT_MAX_CAPACITY
+        : (int)Math.min((long)DEFAULT_MAX_CAPACITY, maxProbeSize / 8);
+    if (maxCapacity < DEFAULT_MIN_MAX_CAPACITY) {
+      maxCapacity = DEFAULT_MIN_MAX_CAPACITY;
+    }
     if (maxCapacity < initialCapacity || initialCapacity <= 0) {
       // Either initialCapacity is too large, or nextHighestPowerOfTwo overflows
       initialCapacity = (Long.bitCount(maxCapacity) == 1)

http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index ff64f52..52c02ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -76,7 +76,6 @@ public class HybridHashTableContainer
   private int totalInMemRowCount = 0;           // total number of small table rows in memory
   private long memoryThreshold;                 // the max memory limit that can be allocated
   private long memoryUsed;                      // the actual memory used
-  private int writeBufferSize;                  // write buffer size for this HybridHashTableContainer
   private final long tableRowSize;              // row size of the small table
   private boolean isSpilled;                    // whether there's any spilled partition
   private int toSpillPartitionId;               // the partition into which to spill the big table row;
@@ -107,7 +106,7 @@ public class HybridHashTableContainer
     Path hashMapLocalPath;                  // Local file system path for spilled hashMap
     boolean hashMapOnDisk;                  // Status of hashMap. true: on disk, false: in memory
     boolean hashMapSpilledOnCreation;       // When there's no enough memory, cannot create hashMap
-    int threshold;                          // Used to create an empty BytesBytesMultiHashMap
+    int initialCapacity;                    // Used to create an empty BytesBytesMultiHashMap
     float loadFactor;                       // Same as above
     int wbSize;                             // Same as above
     int rowsOnDisk;                         // How many rows saved to the on-disk hashmap (if on disk)
@@ -115,17 +114,17 @@ public class HybridHashTableContainer
     /* It may happen that there's not enough memory to instantiate a hashmap for the partition.
      * In that case, we don't create the hashmap, but pretend the hashmap is directly "spilled".
      */
-    public HashPartition(int threshold, float loadFactor, int wbSize, long memUsage,
+    public HashPartition(int initialCapacity, float loadFactor, int wbSize, long maxProbeSize,
                          boolean createHashMap) {
       if (createHashMap) {
-        // Hash map should be at least the size of our designated wbSize
-        memUsage = Math.max(memUsage, wbSize);
-        hashMap = new BytesBytesMultiHashMap(threshold, loadFactor, wbSize, memUsage);
+        // Probe space should be at least equal to the size of our designated wbSize
+        maxProbeSize = Math.max(maxProbeSize, wbSize);
+        hashMap = new BytesBytesMultiHashMap(initialCapacity, loadFactor, wbSize, maxProbeSize);
       } else {
         hashMapSpilledOnCreation = true;
         hashMapOnDisk = true;
       }
-      this.threshold = threshold;
+      this.initialCapacity = initialCapacity;
       this.loadFactor = loadFactor;
       this.wbSize = wbSize;
     }
@@ -138,18 +137,18 @@ public class HybridHashTableContainer
     /* Restore the hashmap from disk by deserializing it.
      * Currently Kryo is used for this purpose.
      */
-    public BytesBytesMultiHashMap getHashMapFromDisk(int initialCapacity)
+    public BytesBytesMultiHashMap getHashMapFromDisk(int rowCount)
         throws IOException, ClassNotFoundException {
       if (hashMapSpilledOnCreation) {
-        return new BytesBytesMultiHashMap(Math.max(threshold, initialCapacity) , loadFactor, wbSize, -1);
+        return new BytesBytesMultiHashMap(rowCount, loadFactor, wbSize, -1);
       } else {
         InputStream inputStream = Files.newInputStream(hashMapLocalPath);
         com.esotericsoftware.kryo.io.Input input = new com.esotericsoftware.kryo.io.Input(inputStream);
         Kryo kryo = Utilities.runtimeSerializationKryo.get();
         BytesBytesMultiHashMap restoredHashMap = kryo.readObject(input, BytesBytesMultiHashMap.class);
 
-        if (initialCapacity > 0) {
-          restoredHashMap.expandAndRehashToTarget(initialCapacity);
+        if (rowCount > 0) {
+          restoredHashMap.expandAndRehashToTarget(rowCount);
         }
 
         // some bookkeeping
@@ -237,7 +236,7 @@ public class HybridHashTableContainer
 
   public HybridHashTableContainer(Configuration hconf, long keyCount, long memoryAvailable,
                                   long estimatedTableSize, HybridHashTableConf nwayConf)
- throws SerDeException, IOException {
+      throws SerDeException, IOException {
     this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT),
         HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD),
         HiveConf.getFloatVar(hconf,HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR),
@@ -245,12 +244,13 @@ public class HybridHashTableContainer
         HiveConf.getIntVar(hconf,HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE),
         HiveConf.getIntVar(hconf,HiveConf.ConfVars.HIVEHASHTABLEWBSIZE),
         HiveConf.getIntVar(hconf,HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS),
+        HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT),
         estimatedTableSize, keyCount, memoryAvailable, nwayConf);
   }
 
   private HybridHashTableContainer(float keyCountAdj, int threshold, float loadFactor,
-      int memCheckFreq, int minWbSize, int maxWbSize, int minNumParts, long estimatedTableSize,
-      long keyCount, long memoryAvailable, HybridHashTableConf nwayConf)
+      int memCheckFreq, int minWbSize, int maxWbSize, int minNumParts, float probePercent,
+      long estimatedTableSize, long keyCount, long memoryAvailable, HybridHashTableConf nwayConf)
       throws SerDeException, IOException {
     directWriteHelper = new MapJoinBytesTableContainer.DirectKeyValueWriter();
 
@@ -262,10 +262,10 @@ public class HybridHashTableContainer
     memoryCheckFrequency = memCheckFreq;
 
     this.nwayConf = nwayConf;
+    int writeBufferSize;
     int numPartitions;
     if (nwayConf == null) { // binary join
-      numPartitions = calcNumPartitions(memoryThreshold, estimatedTableSize, minNumParts, minWbSize,
-          nwayConf);
+      numPartitions = calcNumPartitions(memoryThreshold, estimatedTableSize, minNumParts, minWbSize);
       writeBufferSize = (int)(estimatedTableSize / numPartitions);
     } else {                // n-way join
       // It has been calculated in HashTableLoader earlier, so just need to retrieve that number
@@ -302,21 +302,33 @@ public class HybridHashTableContainer
     int numPartitionsSpilledOnCreation = 0;
     memoryUsed = 0;
     int initialCapacity = Math.max(newKeyCount / numPartitions, threshold / numPartitions);
+    // maxCapacity should be calculated based on a percentage of memoryThreshold, which is to divide
+    // row size using long size
+    float probePercentage = (float) 8 / (tableRowSize + 8); // long_size / tableRowSize + long_size
+    if (probePercentage == 1) {
+      probePercentage = probePercent;
+    }
+    int maxCapacity = (int)(memoryThreshold * probePercentage);
     for (int i = 0; i < numPartitions; i++) {
       if (this.nwayConf == null ||                          // binary join
           nwayConf.getLoadedContainerList().size() == 0) {  // n-way join, first (biggest) small table
         if (i == 0) { // We unconditionally create a hashmap for the first hash partition
-          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize, memoryThreshold, true);
+          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize,
+              maxCapacity, true);
         } else {
-          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize, memoryThreshold,
-              memoryUsed + writeBufferSize < memoryThreshold);
+          // To check whether we have enough memory to allocate for another hash partition,
+          // we need to get the size of the first hash partition to get an idea.
+          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize,
+              maxCapacity, memoryUsed + hashPartitions[0].hashMap.memorySize() < memoryThreshold);
         }
-      } else {                      // n-way join
+      } else {                                              // n-way join, all later small tables
         // For all later small tables, follow the same pattern of the previously loaded tables.
         if (this.nwayConf.doSpillOnCreation(i)) {
-          hashPartitions[i] = new HashPartition(threshold, loadFactor, writeBufferSize, memoryThreshold, false);
+          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize,
+              maxCapacity, false);
         } else {
-          hashPartitions[i] = new HashPartition(threshold, loadFactor, writeBufferSize, memoryThreshold, true);
+          hashPartitions[i] = new HashPartition(initialCapacity, loadFactor, writeBufferSize,
+              maxCapacity, true);
         }
       }
 
@@ -513,7 +525,8 @@ public class HybridHashTableContainer
     Path path = Files.createTempFile("partition-" + partitionId + "-", null);
     OutputStream outputStream = Files.newOutputStream(path);
 
-    com.esotericsoftware.kryo.io.Output output = new com.esotericsoftware.kryo.io.Output(outputStream);
+    com.esotericsoftware.kryo.io.Output output =
+        new com.esotericsoftware.kryo.io.Output(outputStream);
     Kryo kryo = Utilities.runtimeSerializationKryo.get();
     kryo.writeObject(output, partition.hashMap);  // use Kryo to serialize hashmap
     output.close();
@@ -545,11 +558,10 @@ public class HybridHashTableContainer
    * @param dataSize total data size for the table
    * @param minNumParts minimum required number of partitions
    * @param minWbSize minimum required write buffer size
-   * @param nwayConf the n-way join configuration
    * @return number of partitions needed
    */
   public static int calcNumPartitions(long memoryThreshold, long dataSize, int minNumParts,
-      int minWbSize, HybridHashTableConf nwayConf) throws IOException {
+      int minWbSize) throws IOException {
     int numPartitions = minNumParts;
 
     if (memoryThreshold < minNumParts * minWbSize) {
@@ -803,7 +815,8 @@ public class HybridHashTableContainer
         return JoinUtil.JoinResult.SPILL;
       }
       else {
-        aliasFilter = hashPartitions[partitionId].hashMap.getValueResult(output.getData(), 0, output.getLength(), hashMapResult);
+        aliasFilter = hashPartitions[partitionId].hashMap.getValueResult(output.getData(), 0,
+            output.getLength(), hashMapResult);
         dummyRow = null;
         if (hashMapResult.hasRows()) {
           return JoinUtil.JoinResult.MATCH;
@@ -941,7 +954,8 @@ public class HybridHashTableContainer
         return JoinUtil.JoinResult.SPILL;
       }
       else {
-        aliasFilter = hashPartitions[partitionId].hashMap.getValueResult(bytes, offset, length, hashMapResult);
+        aliasFilter = hashPartitions[partitionId].hashMap.getValueResult(bytes, offset, length,
+            hashMapResult);
         dummyRow = null;
         if (hashMapResult.hasRows()) {
           return JoinUtil.JoinResult.MATCH;

http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
index 2b6571b..f7d165a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
@@ -84,6 +84,7 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
 
     // Get the total available memory from memory manager
     long totalMapJoinMemory = desc.getMemoryNeeded();
+    LOG.info("Memory manager allocates " + totalMapJoinMemory + " bytes for the loading hashtable.");
     if (totalMapJoinMemory <= 0) {
       totalMapJoinMemory = HiveConf.getLongVar(
         hconf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
@@ -128,11 +129,9 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
       long memory = tableMemorySizes.get(biggest);
       int numPartitions = 0;
       try {
-        numPartitions = HybridHashTableContainer.calcNumPartitions(memory,
-            maxSize,
+        numPartitions = HybridHashTableContainer.calcNumPartitions(memory, maxSize,
             HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS),
-            HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE),
-            nwayConf);
+            HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE));
       } catch (IOException e) {
         throw new HiveException(e);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/b4be31f4/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
index 05d9359..62250ec 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
@@ -61,7 +61,6 @@ public final class WriteBuffers implements RandomAccessOutput {
     this.offsetMask = this.wbSize - 1;
     this.maxSize = maxSize;
     writePos.bufferIndex = -1;
-    nextBufferToWrite();
   }
 
   public int readVInt() {
@@ -207,6 +206,9 @@ public final class WriteBuffers implements RandomAccessOutput {
 
   @Override
   public void write(byte[] b, int off, int len) {
+    if (writePos.bufferIndex == -1) {
+      nextBufferToWrite();
+    }
     int srcOffset = 0;
     while (srcOffset < len) {
       int toWrite = Math.min(len - srcOffset, wbSize - writePos.offset);
@@ -355,6 +357,9 @@ public final class WriteBuffers implements RandomAccessOutput {
 
 
   public long getWritePoint() {
+    if (writePos.bufferIndex == -1) {
+      nextBufferToWrite();
+    }
     return ((long)writePos.bufferIndex << wbSizeLog2) + writePos.offset;
   }
 
@@ -498,6 +503,9 @@ public final class WriteBuffers implements RandomAccessOutput {
   }
 
   public void seal() {
+    if (writePos.bufferIndex == -1) {
+      return;
+    }
     if (writePos.offset < (wbSize * 0.8)) { // arbitrary
       byte[] smallerBuffer = new byte[writePos.offset];
       System.arraycopy(writePos.buffer, 0, smallerBuffer, 0, writePos.offset);


[22/24] hive git commit: HIVE-11792: User explain in tez does not preserve ordering (Prasanth Jayachandran reviewed by Pengcheng Xiong)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 7595c3e..36f1099 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -53,8 +53,8 @@ Stage-3
    Stats-Aggr Operator
       Stage-0
          Move Operator
-            partition:{"ts":"2012-01-03+14:46:31","ds":"2012-01-03"}
-            table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+            partition:{"ds":"2012-01-03","ts":"2012-01-03+14:46:31"}
+            table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part"}
             Stage-2
                Dependency Collection{}
                   Stage-1
@@ -62,7 +62,7 @@ Stage-3
                      File Output Operator [FS_3]
                         compressed:false
                         Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+                        table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part"}
                         Select Operator [SEL_1]
                            outputColumnNames:["_col0","_col1"]
                            Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -93,8 +93,8 @@ Stage-3
    Stats-Aggr Operator
       Stage-0
          Move Operator
-            partition:{"ts":"2012-01-03+14:46:31","ds":"2012-01-03"}
-            table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+            partition:{"ds":"2012-01-03","ts":"2012-01-03+14:46:31"}
+            table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part"}
             Stage-2
                Dependency Collection{}
                   Stage-1
@@ -102,7 +102,7 @@ Stage-3
                      File Output Operator [FS_7]
                         compressed:false
                         Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-                        table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+                        table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.src_orc_merge_test_part"}
                         Select Operator [SEL_6]
                            outputColumnNames:["_col0","_col1"]
                            Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -144,7 +144,7 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_6]
             |  aggregations:["count(VALUE._col0)"]
             |  outputColumnNames:["_col0"]
@@ -181,7 +181,7 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_6]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -227,7 +227,7 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_6]
             |  aggregations:["count(VALUE._col0)"]
             |  outputColumnNames:["_col0"]
@@ -264,7 +264,7 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_6]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -329,7 +329,7 @@ Stage-0
          File Output Operator [FS_17]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_15]
             |  aggregations:["sum(VALUE._col0)"]
             |  outputColumnNames:["_col0"]
@@ -397,7 +397,7 @@ Stage-0
          File Output Operator [FS_7]
             compressed:false
             Statistics:Num rows: 10 Data size: 885 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_6]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 10 Data size: 885 Basic stats: COMPLETE Column stats: COMPLETE
@@ -443,7 +443,7 @@ Stage-0
          File Output Operator [FS_12]
             compressed:false
             Statistics:Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_11]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
@@ -515,7 +515,7 @@ Stage-0
          File Output Operator [FS_45]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_44]
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -550,7 +550,7 @@ Stage-0
                                  Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                                  Merge Join Operator [MERGEJOIN_55]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                  |  outputColumnNames:["_col1","_col2","_col6"]
                                  |  Statistics:Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                                  |<-Map 11 [SIMPLE_EDGE]
@@ -584,7 +584,7 @@ Stage-0
                                              Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                              Merge Join Operator [MERGEJOIN_54]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                             |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                              |  outputColumnNames:["_col0","_col1","_col2","_col4"]
                                              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                              |<-Reducer 10 [SIMPLE_EDGE]
@@ -701,7 +701,7 @@ Stage-0
          File Output Operator [FS_44]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_43]
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -736,7 +736,7 @@ Stage-0
                                  Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                                  Merge Join Operator [MERGEJOIN_53]
                                  |  condition map:[{"":"Left Outer Join0 to 1"}]
-                                 |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                  |  outputColumnNames:["_col1","_col2","_col6"]
                                  |  Statistics:Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
                                  |<-Map 10 [SIMPLE_EDGE]
@@ -767,7 +767,7 @@ Stage-0
                                              Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                              Merge Join Operator [MERGEJOIN_52]
                                              |  condition map:[{"":"Left Outer Join0 to 1"}]
-                                             |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                             |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                              |  outputColumnNames:["_col0","_col1","_col2","_col4"]
                                              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                              |<-Reducer 3 [SIMPLE_EDGE]
@@ -870,7 +870,7 @@ Stage-0
          File Output Operator [FS_36]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_34]
             |  aggregations:["count(VALUE._col0)"]
             |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
@@ -896,7 +896,7 @@ Stage-0
                            Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                            Merge Join Operator [MERGEJOIN_41]
                            |  condition map:[{"":"Right Outer Join0 to 1"},{"":"Right Outer Join0 to 2"}]
-                           |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                           |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                            |  outputColumnNames:["_col1","_col2","_col4","_col6"]
                            |  Statistics:Num rows: 4 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 8 [SIMPLE_EDGE]
@@ -1014,7 +1014,7 @@ Stage-0
          File Output Operator [FS_43]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_42]
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1049,7 +1049,7 @@ Stage-0
                                  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                                  Merge Join Operator [MERGEJOIN_48]
                                  |  condition map:[{"":"Outer Join 0 to 1"},{"":"Outer Join 0 to 2"}]
-                                 |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                                  |  outputColumnNames:["_col1","_col2","_col4","_col6"]
                                  |  Statistics:Num rows: 4 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
                                  |<-Map 10 [SIMPLE_EDGE]
@@ -1177,7 +1177,7 @@ Stage-0
          File Output Operator [FS_41]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_39]
             |  aggregations:["count(VALUE._col0)"]
             |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
@@ -1203,7 +1203,7 @@ Stage-0
                            Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                            Merge Join Operator [MERGEJOIN_51]
                            |  condition map:[{"":"Inner Join 0 to 1"}]
-                           |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                           |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                            |  outputColumnNames:["_col1","_col2","_col6"]
                            |  Statistics:Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 8 [SIMPLE_EDGE]
@@ -1237,7 +1237,7 @@ Stage-0
                                        Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                        Merge Join Operator [MERGEJOIN_50]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                       |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                        |  outputColumnNames:["_col0","_col1","_col2","_col4"]
                                        |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                        |<-Reducer 2 [SIMPLE_EDGE]
@@ -1327,7 +1327,7 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_6]
                outputColumnNames:["_col0"]
                Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1378,7 +1378,7 @@ Stage-0
          File Output Operator [FS_29]
             compressed:false
             Statistics:Num rows: 3 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_28]
             |  outputColumnNames:["_col0"]
             |  Statistics:Num rows: 3 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1494,7 +1494,7 @@ Stage-0
          File Output Operator [FS_34]
             compressed:false
             Statistics:Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_33]
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1630,10 +1630,10 @@ Stage-0
          File Output Operator [FS_12]
             compressed:false
             Statistics:Num rows: 18 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_17]
             |  condition map:[{"":"Inner Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+            |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
             |  outputColumnNames:["_col0"]
             |  Statistics:Num rows: 18 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 1 [SIMPLE_EDGE]
@@ -1684,13 +1684,13 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_7]
                outputColumnNames:["_col0","_col1"]
                Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_11]
                |  condition map:[{"":"Left Outer Join0 to 1"}]
-               |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                |  outputColumnNames:["_col1","_col3"]
                |  Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -1737,13 +1737,13 @@ Stage-0
          File Output Operator [FS_8]
             compressed:false
             Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_7]
                outputColumnNames:["_col0","_col1"]
                Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_9]
                |  condition map:[{"":"Outer Join 0 to 1"}]
-               |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                |  outputColumnNames:["_col1","_col3"]
                |  Statistics:Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -1790,13 +1790,13 @@ Stage-0
          File Output Operator [FS_14]
             compressed:false
             Statistics:Num rows: 291 Data size: 29391 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_13]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                Statistics:Num rows: 291 Data size: 29391 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_24]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                |  outputColumnNames:["_col1","_col2","_col4","_col5","_col6"]
                |  Statistics:Num rows: 291 Data size: 29391 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -1865,13 +1865,13 @@ Stage-0
          File Output Operator [FS_14]
             compressed:false
             Statistics:Num rows: 291 Data size: 51798 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_13]
                outputColumnNames:["_col0","_col1","_col2","_col3"]
                Statistics:Num rows: 291 Data size: 51798 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_24]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                |  outputColumnNames:["_col0","_col1","_col3","_col4"]
                |  Statistics:Num rows: 291 Data size: 51798 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -1940,7 +1940,7 @@ Stage-0
          File Output Operator [FS_22]
             compressed:false
             Statistics:Num rows: 4 Data size: 404 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_19]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                Statistics:Num rows: 4 Data size: 404 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1949,7 +1949,7 @@ Stage-0
                   Statistics:Num rows: 4 Data size: 404 Basic stats: COMPLETE Column stats: COMPLETE
                   Merge Join Operator [MERGEJOIN_32]
                   |  condition map:[{"":"Inner Join 0 to 1"}]
-                  |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                  |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                   |  outputColumnNames:["_col1","_col2","_col3","_col4","_col6"]
                   |  Statistics:Num rows: 14 Data size: 1414 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 5 [SIMPLE_EDGE]
@@ -1980,7 +1980,7 @@ Stage-0
                            Statistics:Num rows: 4 Data size: 728 Basic stats: COMPLETE Column stats: COMPLETE
                            Merge Join Operator [MERGEJOIN_31]
                            |  condition map:[{"":"Outer Join 0 to 1"}]
-                           |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                           |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                            |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                            |  Statistics:Num rows: 18 Data size: 3276 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 1 [SIMPLE_EDGE]
@@ -2033,7 +2033,7 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 12 Data size: 1212 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_13]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                Statistics:Num rows: 12 Data size: 1212 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2042,7 +2042,7 @@ Stage-0
                   Statistics:Num rows: 12 Data size: 1212 Basic stats: COMPLETE Column stats: COMPLETE
                   Merge Join Operator [MERGEJOIN_23]
                   |  condition map:[{"":"Right Outer Join0 to 1"},{"":"Right Outer Join0 to 2"}]
-                  |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                  |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                   |  outputColumnNames:["_col1","_col2","_col3","_col4","_col6"]
                   |  Statistics:Num rows: 72 Data size: 7272 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 1 [SIMPLE_EDGE]
@@ -2109,7 +2109,7 @@ Stage-0
          File Output Operator [FS_10]
             compressed:false
             Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_9]
                Number of rows:1
                Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2168,7 +2168,7 @@ Stage-0
          File Output Operator [FS_15]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_14]
                Number of rows:1
                Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2246,7 +2246,7 @@ Stage-0
          File Output Operator [FS_13]
             compressed:false
             Statistics:Num rows: 5 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_12]
                Number of rows:5
                Statistics:Num rows: 5 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2304,7 +2304,7 @@ Stage-0
          File Output Operator [FS_13]
             compressed:false
             Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_12]
                Number of rows:5
                Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2372,7 +2372,7 @@ Stage-0
          File Output Operator [FS_48]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_47]
                Number of rows:5
                Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2410,7 +2410,7 @@ Stage-0
                                     Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                                     Merge Join Operator [MERGEJOIN_60]
                                     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                    |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                    |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                     |  outputColumnNames:["_col1","_col2","_col6"]
                                     |  Statistics:Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Map 11 [SIMPLE_EDGE]
@@ -2444,7 +2444,7 @@ Stage-0
                                                 Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                                 Merge Join Operator [MERGEJOIN_59]
                                                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                                |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                                |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                                 |  outputColumnNames:["_col0","_col1","_col2","_col4"]
                                                 |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                                 |<-Reducer 10 [SIMPLE_EDGE]
@@ -2567,13 +2567,13 @@ Stage-0
          File Output Operator [FS_13]
             compressed:false
             Statistics:Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_12]
                outputColumnNames:["_col0"]
                Statistics:Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_18]
                |  condition map:[{"":"Left Semi Join 0 to 1"}]
-               |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                |  outputColumnNames:["_col1"]
                |  Statistics:Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -2629,13 +2629,13 @@ Stage-0
          File Output Operator [FS_24]
             compressed:false
             Statistics:Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_23]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_34]
                |  condition map:[{"":"Left Semi Join 0 to 1"},{"":"Left Semi Join 0 to 2"}]
-               |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+               |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                |  outputColumnNames:["_col0","_col1","_col2"]
                |  Statistics:Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
@@ -2716,7 +2716,7 @@ Stage-0
          File Output Operator [FS_53]
             compressed:false
             Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_52]
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2745,7 +2745,7 @@ Stage-0
                            Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                            Merge Join Operator [MERGEJOIN_66]
                            |  condition map:[{"":"Left Semi Join 0 to 1"},{"":"Left Semi Join 0 to 2"}]
-                           |  keys:{"2":"_col0 (type: string)","1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                           |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
                            |  outputColumnNames:["_col0","_col1"]
                            |  Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 10 [SIMPLE_EDGE]
@@ -2931,7 +2931,7 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_14]
             |  aggregations:["count(VALUE._col0)"]
             |  outputColumnNames:["_col0"]
@@ -3001,7 +3001,7 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_15]
                outputColumnNames:["_col0","_col1"]
                Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
@@ -3010,7 +3010,7 @@ Stage-0
                   Statistics:Num rows: 1 Data size: 269 Basic stats: COMPLETE Column stats: COMPLETE
                   Merge Join Operator [MERGEJOIN_20]
                   |  condition map:[{"":"Left Outer Join0 to 1"}]
-                  |  keys:{"1":"_col1 (type: string)","0":"_col1 (type: string)"}
+                  |  keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
                   |  outputColumnNames:["_col0","_col1","_col3"]
                   |  Statistics:Num rows: 193 Data size: 51917 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 1 [SIMPLE_EDGE]
@@ -3091,7 +3091,7 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_15]
                outputColumnNames:["_col0","_col1"]
                Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
@@ -3100,7 +3100,7 @@ Stage-0
                   Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
                   Merge Join Operator [MERGEJOIN_20]
                   |  condition map:[{"":"Left Outer Join0 to 1"}]
-                  |  keys:{"1":"_col0 (type: string), _col1 (type: string)","0":"_col1 (type: string), _col0 (type: string)"}
+                  |  keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"}
                   |  outputColumnNames:["_col0","_col1","_col3"]
                   |  Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
                   |<-Map 4 [SIMPLE_EDGE]
@@ -3184,10 +3184,10 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_21]
             |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: string), _col1 (type: string)","0":"_col1 (type: string), _col0 (type: string)"}
+            |  keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"}
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 1 [SIMPLE_EDGE]
@@ -3256,10 +3256,10 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_21]
             |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: string), _col1 (type: string)","0":"_col1 (type: string), _col0 (type: string)"}
+            |  keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"}
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 1 [SIMPLE_EDGE]
@@ -3318,10 +3318,10 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_21]
             |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+            |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Map 1 [SIMPLE_EDGE]
@@ -3385,13 +3385,13 @@ Stage-0
          File Output Operator [FS_28]
             compressed:false
             Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_27]
                outputColumnNames:["_col0","_col1"]
                Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                Merge Join Operator [MERGEJOIN_38]
                |  condition map:[{"":"Inner Join 0 to 1"}]
-               |  keys:{"1":"_col0 (type: int)","0":"_col1 (type: int)"}
+               |  keys:{"0":"_col1 (type: int)","1":"_col0 (type: int)"}
                |  outputColumnNames:["_col1","_col2"]
                |  Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Reducer 2 [SIMPLE_EDGE]
@@ -3403,7 +3403,7 @@ Stage-0
                |     value expressions:_col2 (type: int)
                |     Merge Join Operator [MERGEJOIN_37]
                |     |  condition map:[{"":"Left Semi Join 0 to 1"}]
-               |     |  keys:{"1":"_col0 (type: int)","0":"_col0 (type: int)"}
+               |     |  keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"}
                |     |  outputColumnNames:["_col1","_col2"]
                |     |  Statistics:Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
                |     |<-Map 1 [SIMPLE_EDGE]
@@ -3499,10 +3499,10 @@ Stage-0
          File Output Operator [FS_38]
             compressed:false
             Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_51]
             |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: bigint)","0":"_col2 (type: bigint)"}
+            |  keys:{"0":"_col2 (type: bigint)","1":"_col0 (type: bigint)"}
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Reducer 3 [SIMPLE_EDGE]
@@ -3534,7 +3534,7 @@ Stage-0
             |                 Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE
             |                 Merge Join Operator [MERGEJOIN_50]
             |                 |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |                 |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+            |                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
             |                 |  outputColumnNames:["_col0","_col1"]
             |                 |  Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
             |                 |<-Map 1 [SIMPLE_EDGE]
@@ -3645,10 +3645,10 @@ Stage-0
          File Output Operator [FS_22]
             compressed:false
             Statistics:Num rows: 3 Data size: 681 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_27]
             |  condition map:[{"":"Left Semi Join 0 to 1"}]
-            |  keys:{"1":"_col0 (type: string)","0":"_col1 (type: string)"}
+            |  keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"}
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 3 Data size: 681 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Reducer 2 [SIMPLE_EDGE]
@@ -3701,7 +3701,7 @@ Stage-0
                            predicate:first_value_window_0 is not null (type: boolean)
                            Statistics:Num rows: 13 Data size: 6383 Basic stats: COMPLETE Column stats: COMPLETE
                            PTF Operator [PTF_11]
-                              Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"_col2","name:":"windowingtablefunction","order by:":"_col5"}]
+                              Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"_col5","partition by:":"_col2"}]
                               Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
                               Select Operator [SEL_10]
                               |  outputColumnNames:["_col1","_col2","_col5"]
@@ -3747,7 +3747,7 @@ Stage-0
          File Output Operator [FS_29]
             compressed:false
             Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_28]
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
@@ -3765,7 +3765,7 @@ Stage-0
                         Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
                         Merge Join Operator [MERGEJOIN_37]
                         |  condition map:[{"":"Left Outer Join0 to 1"}]
-                        |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                        |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                         |  outputColumnNames:["_col0","_col1","_col3"]
                         |  Statistics:Num rows: 605 Data size: 107690 Basic stats: COMPLETE Column stats: NONE
                         |<-Map 7 [SIMPLE_EDGE]
@@ -3868,7 +3868,7 @@ Stage-0
          File Output Operator [FS_27]
             compressed:false
             Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_26]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE
@@ -3877,7 +3877,7 @@ Stage-0
                   Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE
                   Merge Join Operator [MERGEJOIN_35]
                   |  condition map:[{"":"Left Outer Join0 to 1"}]
-                  |  keys:{"1":"_col0 (type: string), _col1 (type: string)","0":"_col0 (type: string), _col1 (type: string)"}
+                  |  keys:{"0":"_col0 (type: string), _col1 (type: string)","1":"_col0 (type: string), _col1 (type: string)"}
                   |  outputColumnNames:["_col0","_col1","_col2","_col4"]
                   |  Statistics:Num rows: 30 Data size: 7014 Basic stats: COMPLETE Column stats: NONE
                   |<-Map 6 [SIMPLE_EDGE]
@@ -3982,7 +3982,7 @@ Stage-0
          File Output Operator [FS_39]
             compressed:false
             Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_38]
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE
@@ -4007,7 +4007,7 @@ Stage-0
                   |        Statistics:Num rows: 1 Data size: 133 Basic stats: COMPLETE Column stats: COMPLETE
                   |        Merge Join Operator [MERGEJOIN_48]
                   |        |  condition map:[{"":"Left Outer Join0 to 1"}]
-                  |        |  keys:{"1":"_col0 (type: double)","0":"UDFToDouble(_col1) (type: double)"}
+                  |        |  keys:{"0":"UDFToDouble(_col1) (type: double)","1":"_col0 (type: double)"}
                   |        |  outputColumnNames:["_col0","_col1","_col2"]
                   |        |  Statistics:Num rows: 1 Data size: 133 Basic stats: COMPLETE Column stats: COMPLETE
                   |        |<-Map 1 [SIMPLE_EDGE]
@@ -4131,7 +4131,7 @@ Stage-0
          File Output Operator [FS_44]
             compressed:false
             Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_43]
             |  outputColumnNames:["_col0","_col1"]
             |  Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE
@@ -4149,7 +4149,7 @@ Stage-0
                         Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE
                         Merge Join Operator [MERGEJOIN_53]
                         |  condition map:[{"":"Left Outer Join0 to 1"}]
-                        |  keys:{"1":"_col0 (type: string), _col1 (type: double)","0":"_col0 (type: string), _col1 (type: double)"}
+                        |  keys:{"0":"_col0 (type: string), _col1 (type: double)","1":"_col0 (type: string), _col1 (type: double)"}
                         |  outputColumnNames:["_col0","_col1","_col3"]
                         |  Statistics:Num rows: 5 Data size: 641 Basic stats: COMPLETE Column stats: NONE
                         |<-Reducer 10 [SIMPLE_EDGE]
@@ -4294,12 +4294,12 @@ Stage-0
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 20 Data size: 1040 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_4]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
                Statistics:Num rows: 20 Data size: 1040 Basic stats: COMPLETE Column stats: COMPLETE
                PTF Operator [PTF_3]
-                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"0","name:":"windowingtablefunction","order by:":"0"}]
+                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"0","partition by:":"0"}]
                   Statistics:Num rows: 20 Data size: 9184 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator [SEL_2]
                   |  outputColumnNames:["_col2","_col3"]
@@ -4332,12 +4332,12 @@ Stage-0
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 20 Data size: 1040 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_4]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
                Statistics:Num rows: 20 Data size: 1040 Basic stats: COMPLETE Column stats: COMPLETE
                PTF Operator [PTF_3]
-                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"0","name:":"windowingtablefunction","order by:":"0"}]
+                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"0","partition by:":"0"}]
                   Statistics:Num rows: 20 Data size: 9184 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator [SEL_2]
                   |  outputColumnNames:["_col2","_col3"]
@@ -4370,12 +4370,12 @@ Stage-0
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 20 Data size: 1280 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_4]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
                Statistics:Num rows: 20 Data size: 1280 Basic stats: COMPLETE Column stats: COMPLETE
                PTF Operator [PTF_3]
-                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"_col0","name:":"windowingtablefunction","order by:":"_col1"}]
+                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}]
                   Statistics:Num rows: 20 Data size: 12244 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator [SEL_2]
                   |  outputColumnNames:["_col0","_col1","_col2","_col3"]
@@ -4408,12 +4408,12 @@ Stage-0
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 25 Data size: 4475 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_4]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 25 Data size: 4475 Basic stats: COMPLETE Column stats: COMPLETE
                PTF Operator [PTF_3]
-                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"_col0","name:":"windowingtablefunction","order by:":"_col1"}]
+                  Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}]
                   Statistics:Num rows: 25 Data size: 11075 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator [SEL_2]
                   |  outputColumnNames:["_col0","_col1"]
@@ -4455,7 +4455,7 @@ Stage-0
          File Output Operator [FS_20]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_18]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -4494,7 +4494,7 @@ Stage-0
                                     Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                     Merge Join Operator [MERGEJOIN_25]
                                     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                    |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                    |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                     |  outputColumnNames:["_col1"]
                                     |  Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Map 1 [SIMPLE_EDGE]
@@ -4555,7 +4555,7 @@ Stage-0
          File Output Operator [FS_20]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_18]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -4594,7 +4594,7 @@ Stage-0
                                     Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                     Merge Join Operator [MERGEJOIN_25]
                                     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                    |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                    |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                     |  outputColumnNames:["_col1"]
                                     |  Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                     |<-Map 1 [SIMPLE_EDGE]
@@ -4655,7 +4655,7 @@ Stage-0
          File Output Operator [FS_20]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_18]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -4749,7 +4749,7 @@ Stage-0
          File Output Operator [FS_22]
             compressed:false
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_20]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -4785,7 +4785,7 @@ Stage-0
                                  Statistics:Num rows: 12 Data size: 1128 Basic stats: COMPLETE Column stats: COMPLETE
                                  Merge Join Operator [MERGEJOIN_27]
                                  |  condition map:[{"":"Left Semi Join 0 to 1"}]
-                                 |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
                                  |  outputColumnNames:["_col0"]
                                  |  Statistics:Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
                                  |<-Map 1 [SIMPLE_EDGE]
@@ -4867,7 +4867,7 @@ Stage-0
          File Output Operator [FS_7]
             compressed:false
             Statistics:Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_5]
             |  aggregations:["count(DISTINCT KEY._col1:0._col0)","count(DISTINCT KEY._col1:1._col0)","sum(VALUE._col2)"]
             |  keys:KEY._col0 (type: int)
@@ -4909,7 +4909,7 @@ Stage-0
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["count(DISTINCT KEY._col1:0._col0)","count(DISTINCT KEY._col1:1._col0)","sum(VALUE._col0)"]
             |  keys:KEY._col0 (type: int)
@@ -5019,7 +5019,7 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["count(1)"]
             |  outputColumnNames:["_col0"]
@@ -5051,7 +5051,7 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -5110,7 +5110,7 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["count(1)"]
             |  outputColumnNames:["_col0"]
@@ -5142,7 +5142,7 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -5192,7 +5192,7 @@ Stage-0
          File Output Operator [FS_9]
             compressed:true
             Statistics:Num rows: 125000 Data size: 10875000 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_11]
             |  condition map:[{"":"Inner Join 0 to 1"}]
             |  keys:{}
@@ -5263,7 +5263,7 @@ Stage-3
                      File Output Operator [FS_8]
                         compressed:true
                         Statistics:Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.nzhang_CTAS1","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+                        table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.nzhang_CTAS1"}
                         Limit [LIM_7]
                            Number of rows:10
                            Statistics:Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
@@ -5331,7 +5331,7 @@ Stage-3
                      File Output Operator [FS_8]
                         compressed:true
                         Statistics:Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:{"serde:":"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe","name:":"default.nzhang_ctas3","input format:":"org.apache.hadoop.hive.ql.io.RCFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.RCFileOutputFormat"}
+                        table:{"input format:":"org.apache.hadoop.hive.ql.io.RCFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.RCFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe","name:":"default.nzhang_ctas3"}
                         Limit [LIM_7]
                            Number of rows:10
                            Statistics:Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE
@@ -5434,7 +5434,7 @@ Stage-0
          File Output Operator [FS_12]
             compressed:true
             Statistics:Num rows: 13778 Data size: 4904968 Basic stats: COMPLETE Column stats: COMPLETE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_11]
             |  outputColumnNames:["_col0","_col1","_col2","_col3"]
             |  Statistics:Num rows: 13778 Data size: 4904968 Basic stats: COMPLETE Column stats: COMPLETE
@@ -5510,13 +5510,13 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_5]
                outputColumnNames:["_col0","_col1","_col2","_col3"]
                Statistics:Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_7]
                |  condition map:[{"":"Inner Join 0 to 1"}]
-               |  keys:{"1":"value (type: int)","0":"key (type: int)"}
+               |  keys:{"0":"key (type: int)","1":"value (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6"]
                |  Statistics:Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -5557,13 +5557,13 @@ Stage-0
          File Output Operator [FS_11]
             compressed:true
             Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_10]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_21]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"key (type: int)","1":"value (type: int)","0":"key (type: int)"}
+               |  keys:{"0":"key (type: int)","1":"value (type: int)","2":"key (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11"]
                |  Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -5623,13 +5623,13 @@ Stage-0
          File Output Operator [FS_8]
             compressed:true
             Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_7]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_9]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"key (type: int)","1":"value (type: int)","0":"key (type: int)"}
+               |  keys:{"0":"key (type: int)","1":"value (type: int)","2":"key (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11"]
                |  Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -5680,13 +5680,13 @@ Stage-0
          File Output Operator [FS_11]
             compressed:true
             Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_10]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_15]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"key (type: int), value (type: int)","1":"value (type: int), key (type: int)","0":"key (type: int), value (type: int)"}
+               |  keys:{"0":"key (type: int), value (type: int)","1":"value (type: int), key (type: int)","2":"key (type: int), value (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11"]
                |  Statistics:Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -5743,13 +5743,13 @@ Stage-0
          File Output Operator [FS_8]
             compressed:true
             Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_7]
                outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_9]
                |  condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 0 to 2"}]
-               |  keys:{"2":"key (type: int), value (type: int)","1":"value (type: int), key (type: int)","0":"key (type: int), value (type: int)"}
+               |  keys:{"0":"key (type: int), value (type: int)","1":"value (type: int), key (type: int)","2":"key (type: int), value (type: int)"}
                |  outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11"]
                |  Statistics:Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
@@ -5797,13 +5797,13 @@ Stage-0
          File Output Operator [FS_6]
             compressed:true
             Statistics:Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Select Operator [SEL_5]
                outputColumnNames:["_col0","_col1","_col2","_col3"]
                Statistics:Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
                Merge Join Operator [MERGEJOIN_7]
                |  condition map:[{"":"Left Outer Join0 to 1"}]
-               |  keys:{"1":"value (type: int)","0":"key (type: int)"}
+               |  keys:{"0":"key (type: int)","1":"value (type: int)"}
                |  outputColumnNames:[

<TRUNCATED>

[10/24] hive git commit: HIVE-11645 : Add in-place updates for dynamic partitions loading (Ashutosh Chauhan via Prasanth J)

Posted by pr...@apache.org.
HIVE-11645 : Add in-place updates for dynamic partitions loading (Ashutosh Chauhan via Prasanth J)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f4361bf3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f4361bf3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f4361bf3

Branch: refs/heads/llap
Commit: f4361bf30689c4767e966e11c610f7ead632415a
Parents: 9fe8802
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Sep 10 14:52:43 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Sep 10 14:52:43 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/MoveTask.java    | 12 ++--
 .../apache/hadoop/hive/ql/exec/StatsTask.java   | 13 +++-
 .../hadoop/hive/ql/exec/tez/InPlaceUpdates.java | 65 +++++++++++++++++++
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  | 66 ++------------------
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 25 ++++++--
 5 files changed, 106 insertions(+), 75 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f4361bf3/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 50c4a96..a1f8973 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -361,7 +361,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
             if (dps != null && dps.size() > 0) {
               pushFeed(FeedType.DYNAMIC_PARTITIONS, dps);
             }
-
+            console.printInfo(System.getProperty("line.separator"));
             long startTime = System.currentTimeMillis();
             // load the list of DP partitions and return the list of partition specs
             // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions
@@ -381,8 +381,9 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 isSkewedStoredAsDirs(tbd),
                 work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID,
                 SessionState.get().getTxnMgr().getCurrentTxnId());
-            console.printInfo("\t Time taken for load dynamic partitions : "  +
-                (System.currentTimeMillis() - startTime));
+
+            console.printInfo("\t Time taken to load dynamic partitions: "  +
+                (System.currentTimeMillis() - startTime)/1000.0 + " seconds");
 
             if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
               throw new HiveException("This query creates no partitions." +
@@ -425,11 +426,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
                 SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc,
                     table.getCols());
               }
-
-              console.printInfo("\tLoading partition " + entry.getKey());
+              LOG.info("\tLoading partition " + entry.getKey());
             }
             console.printInfo("\t Time taken for adding to write entity : " +
-                (System.currentTimeMillis() - startTime));
+                (System.currentTimeMillis() - startTime)/1000.0 + " seconds");
             dc = null; // reset data container to prevent it being added again.
           } else { // static partitions
             List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(),

http://git-wip-us.apache.org/repos/asf/hive/blob/f4361bf3/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 2a8167a..41ece04 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -182,8 +183,10 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
         parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE);
 
         db.alterTable(tableFullName, new Table(tTable));
-
-        console.printInfo("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
+        if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
+          console.printInfo("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
+        }
+        LOG.info("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
       } else {
         // Partitioned table:
         // Need to get the old stats of the partition
@@ -215,7 +218,11 @@ public class StatsTask extends Task<StatsWork> implements Serializable {
           parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE);
           updates.add(new Partition(table, tPart));
 
-          console.printInfo("Partition " + tableFullName + partn.getSpec() +
+          if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
+            console.printInfo("Partition " + tableFullName + partn.getSpec() +
+            " stats: [" + toString(parameters) + ']');
+          }
+          LOG.info("Partition " + tableFullName + partn.getSpec() +
               " stats: [" + toString(parameters) + ']');
         }
         if (!updates.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f4361bf3/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
new file mode 100644
index 0000000..6ecfe71
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/InPlaceUpdates.java
@@ -0,0 +1,65 @@
+package org.apache.hadoop.hive.ql.exec.tez;
+
+import static org.fusesource.jansi.Ansi.ansi;
+import static org.fusesource.jansi.internal.CLibrary.STDERR_FILENO;
+import static org.fusesource.jansi.internal.CLibrary.STDOUT_FILENO;
+import static org.fusesource.jansi.internal.CLibrary.isatty;
+
+import java.io.PrintStream;
+
+import jline.TerminalFactory;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.fusesource.jansi.Ansi;
+
+public class InPlaceUpdates {
+
+  private static final int MIN_TERMINAL_WIDTH = 80;
+
+  static boolean isUnixTerminal() {
+
+    String os = System.getProperty("os.name");
+    if (os.startsWith("Windows")) {
+      // we do not support Windows, we will revisit this if we really need it for windows.
+      return false;
+    }
+
+    // We must be on some unix variant..
+    // check if standard out is a terminal
+    try {
+      // isatty system call will return 1 if the file descriptor is terminal else 0
+      if (isatty(STDOUT_FILENO) == 0) {
+        return false;
+      }
+      if (isatty(STDERR_FILENO) == 0) {
+        return false;
+      }
+    } catch (NoClassDefFoundError ignore) {
+      // These errors happen if the JNI lib is not available for your platform.
+      return false;
+    } catch (UnsatisfiedLinkError ignore) {
+      // These errors happen if the JNI lib is not available for your platform.
+      return false;
+    }
+    return true;
+  }
+
+  public static boolean inPlaceEligible(HiveConf conf) {
+    boolean inPlaceUpdates = HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS);
+
+    // we need at least 80 chars wide terminal to display in-place updates properly
+    return inPlaceUpdates && !SessionState.getConsole().getIsSilent() && isUnixTerminal()
+      && TerminalFactory.get().getWidth() >= MIN_TERMINAL_WIDTH;
+  }
+
+  public static void reprintLine(PrintStream out, String line) {
+    out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString());
+    out.flush();
+  }
+
+  public static void rePositionCursor(PrintStream ps) {
+    ps.print(ansi().cursorUp(0).toString());
+    ps.flush();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/f4361bf3/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
index 1a4decf..1e1603b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
@@ -20,9 +20,6 @@ package org.apache.hadoop.hive.ql.exec.tez;
 
 import static org.apache.tez.dag.api.client.DAGStatus.State.RUNNING;
 import static org.fusesource.jansi.Ansi.ansi;
-import static org.fusesource.jansi.internal.CLibrary.STDOUT_FILENO;
-import static org.fusesource.jansi.internal.CLibrary.STDERR_FILENO;
-import static org.fusesource.jansi.internal.CLibrary.isatty;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -74,7 +71,7 @@ import jline.TerminalFactory;
 public class TezJobMonitor {
 
   private static final String CLASS_NAME = TezJobMonitor.class.getName();
-  private static final int MIN_TERMINAL_WIDTH = 80;
+
   private static final int COLUMN_1_WIDTH = 16;
   private static final int SEPARATOR_WIDTH = 80;
 
@@ -156,42 +153,13 @@ public class TezJobMonitor {
     }
   }
 
-  private static boolean isUnixTerminal() {
-
-    String os = System.getProperty("os.name");
-    if (os.startsWith("Windows")) {
-      // we do not support Windows, we will revisit this if we really need it for windows.
-      return false;
-    }
-
-    // We must be on some unix variant..
-    // check if standard out is a terminal
-    try {
-      // isatty system call will return 1 if the file descriptor is terminal else 0
-      if (isatty(STDOUT_FILENO) == 0) {
-        return false;
-      }
-      if (isatty(STDERR_FILENO) == 0) {
-        return false;
-      }
-    } catch (NoClassDefFoundError ignore) {
-      // These errors happen if the JNI lib is not available for your platform.
-      return false;
-    } catch (UnsatisfiedLinkError ignore) {
-      // These errors happen if the JNI lib is not available for your platform.
-      return false;
-    }
-    return true;
-  }
-
   /**
    * NOTE: Use this method only if isUnixTerminal is true.
    * Erases the current line and prints the given line.
    * @param line - line to print
    */
   public void reprintLine(String line) {
-    out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString());
-    out.flush();
+    InPlaceUpdates.reprintLine(out, line);
     lines++;
   }
 
@@ -234,15 +202,6 @@ public class TezJobMonitor {
   }
 
   /**
-   * NOTE: Use this method only if isUnixTerminal is true.
-   * Gets the width of the terminal
-   * @return - width of terminal
-   */
-  public int getTerminalWidth() {
-    return TerminalFactory.get().getWidth();
-  }
-
-  /**
    * monitorExecution handles status printing, failures during execution and final status retrieval.
    *
    * @param dagClient client that was used to kick off the job
@@ -265,26 +224,11 @@ public class TezJobMonitor {
     Set<StatusGetOpts> opts = new HashSet<StatusGetOpts>();
     Heartbeater heartbeater = new Heartbeater(txnMgr, conf);
     long startTime = 0;
-    boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) ||
+    boolean isProfileEnabled = HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) ||
       Utilities.isPerfOrAboveLogging(conf);
-    boolean inPlaceUpdates = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS);
-    boolean wideTerminal = false;
-    boolean isTerminal = inPlaceUpdates == true ? isUnixTerminal() : false;
-
-    // we need at least 80 chars wide terminal to display in-place updates properly
-    if (isTerminal) {
-      if (getTerminalWidth() >= MIN_TERMINAL_WIDTH) {
-        wideTerminal = true;
-      }
-    }
-
-    boolean inPlaceEligible = false;
-    if (inPlaceUpdates && isTerminal && wideTerminal && !console.getIsSilent()) {
-      inPlaceEligible = true;
-    }
 
+    boolean inPlaceEligible = InPlaceUpdates.inPlaceEligible(conf);
     shutdownList.add(dagClient);
-
     console.printInfo("\n");
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING);
@@ -470,7 +414,7 @@ public class TezJobMonitor {
       DAGClient dagClient, HiveConf conf, DAG dag) {
 
     /* Strings for headers and counters */
-    String hiveCountersGroup = conf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
+    String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
     Set<StatusGetOpts> statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS);
     TezCounters hiveCounters = null;
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/f4361bf3/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c449aee..c78e8f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 import com.google.common.collect.Sets;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -81,6 +82,7 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.FunctionTask;
 import org.apache.hadoop.hive.ql.exec.FunctionUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.tez.InPlaceUpdates;
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
@@ -101,6 +103,7 @@ import org.apache.thrift.TException;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -124,7 +127,6 @@ import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
 import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
 import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 
-
 /**
  * This class has functions that implement meta data/DDL operations using calls
  * to the metastore.
@@ -1606,22 +1608,31 @@ private void constructOneLBLocationMap(FileStatus fSta,
         }
       }
 
-      if (validPartitions.size() == 0) {
+      int partsToLoad = validPartitions.size();
+      if (partsToLoad == 0) {
         LOG.warn("No partition is generated by dynamic partitioning");
       }
 
-      if (validPartitions.size() > conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS)) {
-        throw new HiveException("Number of dynamic partitions created is " + validPartitions.size()
+      if (partsToLoad > conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS)) {
+        throw new HiveException("Number of dynamic partitions created is " + partsToLoad
             + ", which is more than "
             + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS)
             +". To solve this try to set " + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname
-            + " to at least " + validPartitions.size() + '.');
+            + " to at least " + partsToLoad + '.');
       }
 
       Table tbl = getTable(tableName);
       // for each dynamically created DP directory, construct a full partition spec
       // and load the partition based on that
       Iterator<Path> iter = validPartitions.iterator();
+      LOG.info("Going to load " + partsToLoad + " partitions.");
+      PrintStream ps = null;
+      boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0
+          && InPlaceUpdates.inPlaceEligible(conf);
+      if(inPlaceEligible) {
+        ps = SessionState.getConsole().getInfoStream();
+      }
+      int partitionsLoaded = 0;
       while (iter.hasNext()) {
         // get the dynamically created directory
         Path partPath = iter.next();
@@ -1634,6 +1645,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
         Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace,
             holdDDLTime, true, listBucketingEnabled, false, isAcid);
         partitionsMap.put(fullPartSpec, newPartition);
+        if (inPlaceEligible) {
+          InPlaceUpdates.rePositionCursor(ps);
+          InPlaceUpdates.reprintLine(ps, "Loaded : " + ++partitionsLoaded + "/" + partsToLoad +" partitions.");
+        }
         LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
       }
       if (isAcid) {


[20/24] hive git commit: HIVE-11792: User explain in tez does not preserve ordering (Prasanth Jayachandran reviewed by Pengcheng Xiong)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index 9358158..79c7116 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -160,7 +160,7 @@ Stage-2
          File Output Operator [FS_6]
             compressed:false
             Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Group By Operator [GBY_4]
             |  aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"]
             |  outputColumnNames:["_col0","_col1"]
@@ -244,7 +244,7 @@ Stage-3
                      File Output Operator [FS_2]
                         compressed:false
                         Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.src_autho_test","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+                        table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.src_autho_test"}
                         Select Operator [SEL_1]
                            outputColumnNames:["_col0","_col1"]
                            Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -433,7 +433,7 @@ Stage-0
          File Output Operator [FS_5]
             compressed:false
             Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Limit [LIM_4]
                Number of rows:5
                Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
@@ -479,7 +479,7 @@ Stage-3
    Stats-Aggr Operator
       Stage-0
          Move Operator
-            table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
             Stage-2
                Dependency Collection{}
                   Stage-5(CONDITIONAL)
@@ -491,7 +491,7 @@ Stage-3
                                  File Output Operator [FS_3]
                                     compressed:false
                                     Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
-                                    table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"}
+                                    table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
                                     Select Operator [SEL_2]
                                        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                                        Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE


[18/24] hive git commit: HIVE-11763: Use * instead of sum(hash(*)) on Parquet predicate (PPD) integration tests (Sergio Pena, reviewed by Ferdinand Xu)

Posted by pr...@apache.org.
HIVE-11763: Use * instead of sum(hash(*)) on Parquet predicate (PPD) integration tests (Sergio Pena, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66fb9601
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66fb9601
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66fb9601

Branch: refs/heads/llap
Commit: 66fb9601dc1195518f2cc764851823c34c9d7dfe
Parents: bbb9129
Author: Sergio Pena <se...@cloudera.com>
Authored: Sun Sep 13 00:20:27 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Sun Sep 13 00:20:27 2015 -0500

----------------------------------------------------------------------
 .../clientpositive/parquet_ppd_boolean.q        |  42 +-
 .../queries/clientpositive/parquet_ppd_char.q   |  46 +-
 .../queries/clientpositive/parquet_ppd_date.q   |  64 +-
 .../clientpositive/parquet_ppd_decimal.q        | 106 ++--
 .../clientpositive/parquet_ppd_timestamp.q      |  62 +-
 .../clientpositive/parquet_ppd_varchar.q        |  46 +-
 .../clientpositive/parquet_predicate_pushdown.q |  20 +-
 .../clientpositive/parquet_ppd_boolean.q.out    | 194 ++++--
 .../clientpositive/parquet_ppd_char.q.out       | 224 ++++---
 .../clientpositive/parquet_ppd_date.q.out       | 324 +++++++---
 .../clientpositive/parquet_ppd_decimal.q.out    | 594 ++++++++++++++-----
 .../clientpositive/parquet_ppd_timestamp.q.out  | 314 +++++++---
 .../clientpositive/parquet_ppd_varchar.q.out    | 224 ++++---
 .../parquet_predicate_pushdown.q.out            | 118 ++--
 14 files changed, 1584 insertions(+), 794 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_boolean.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_boolean.q b/ql/src/test/queries/clientpositive/parquet_ppd_boolean.q
index 05c6c50..a7848b4 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_boolean.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_boolean.q
@@ -4,32 +4,32 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2 limit 10) uniontbl;
 
 SET hive.optimize.ppd=true;
 SET hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where b=true;
-select sum(hash(*)) from newtypestbl where b!=true;
-select sum(hash(*)) from newtypestbl where b<true;
-select sum(hash(*)) from newtypestbl where b>true;
-select sum(hash(*)) from newtypestbl where b<=true;
+select * from newtypestbl where b=true;
+select * from newtypestbl where b!=true;
+select * from newtypestbl where b<true;
+select * from newtypestbl where b>true;
+select * from newtypestbl where b<=true;
 
-select sum(hash(*)) from newtypestbl where b=false;
-select sum(hash(*)) from newtypestbl where b!=false;
-select sum(hash(*)) from newtypestbl where b<false;
-select sum(hash(*)) from newtypestbl where b>false;
-select sum(hash(*)) from newtypestbl where b<=false;
+select * from newtypestbl where b=false;
+select * from newtypestbl where b!=false;
+select * from newtypestbl where b<false;
+select * from newtypestbl where b>false;
+select * from newtypestbl where b<=false;
 
 
 SET hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where b=true;
-select sum(hash(*)) from newtypestbl where b!=true;
-select sum(hash(*)) from newtypestbl where b<true;
-select sum(hash(*)) from newtypestbl where b>true;
-select sum(hash(*)) from newtypestbl where b<=true;
+select * from newtypestbl where b=true;
+select * from newtypestbl where b!=true;
+select * from newtypestbl where b<true;
+select * from newtypestbl where b>true;
+select * from newtypestbl where b<=true;
 
-select sum(hash(*)) from newtypestbl where b=false;
-select sum(hash(*)) from newtypestbl where b!=false;
-select sum(hash(*)) from newtypestbl where b<false;
-select sum(hash(*)) from newtypestbl where b>false;
-select sum(hash(*)) from newtypestbl where b<=false;
\ No newline at end of file
+select * from newtypestbl where b=false;
+select * from newtypestbl where b!=false;
+select * from newtypestbl where b<false;
+select * from newtypestbl where b>false;
+select * from newtypestbl where b<=false;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_char.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_char.q b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
index b01612c..dcad622 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_char.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
@@ -5,72 +5,72 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where c="apple";
+select * from newtypestbl where c="apple";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c="apple";
+select * from newtypestbl where c="apple";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c!="apple";
+select * from newtypestbl where c!="apple";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c!="apple";
+select * from newtypestbl where c!="apple";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c<"hello";
+select * from newtypestbl where c<"hello";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c<"hello";
+select * from newtypestbl where c<"hello";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c<="hello";
+select * from newtypestbl where c<="hello";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c<="hello";
+select * from newtypestbl where c<="hello";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c="apple ";
+select * from newtypestbl where c="apple ";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c="apple ";
+select * from newtypestbl where c="apple ";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c in ("apple", "carrot");
+select * from newtypestbl where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c in ("apple", "carrot");
+select * from newtypestbl where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c in ("apple", "hello");
+select * from newtypestbl where c in ("apple", "hello");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c in ("apple", "hello");
+select * from newtypestbl where c in ("apple", "hello");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c in ("carrot");
+select * from newtypestbl where c in ("carrot");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c in ("carrot");
+select * from newtypestbl where c in ("carrot");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c between "apple" and "carrot";
+select * from newtypestbl where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c between "apple" and "carrot";
+select * from newtypestbl where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c between "apple" and "zombie";
+select * from newtypestbl where c between "apple" and "zombie";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c between "apple" and "zombie";
+select * from newtypestbl where c between "apple" and "zombie";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1";
+select * from newtypestbl where c between "carrot" and "carrot1";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1";
\ No newline at end of file
+select * from newtypestbl where c between "carrot" and "carrot1";
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_date.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_date.q b/ql/src/test/queries/clientpositive/parquet_ppd_date.q
index a18a9cf..a05d358 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_date.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_date.q
@@ -5,97 +5,97 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl;
 
 -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where da='1970-02-20';
+select * from newtypestbl where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da='1970-02-20';
+select * from newtypestbl where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da= date '1970-02-20';
+select * from newtypestbl where da= date '1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date);
+select * from newtypestbl where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as date);
+select * from newtypestbl where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20));
+select * from newtypestbl where da=cast('1970-02-20' as varchar(20));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da=cast('1970-02-20' as varchar(20));
+select * from newtypestbl where da=cast('1970-02-20' as varchar(20));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da!='1970-02-20';
+select * from newtypestbl where da!='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da!='1970-02-20';
+select * from newtypestbl where da!='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da<'1970-02-27';
+select * from newtypestbl where da<'1970-02-27';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da<'1970-02-27';
+select * from newtypestbl where da<'1970-02-27';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da<'1970-02-29';
+select * from newtypestbl where da<'1970-02-29';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da<'1970-02-29';
+select * from newtypestbl where da<'1970-02-29';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da<'1970-02-15';
+select * from newtypestbl where da<'1970-02-15';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da<'1970-02-15';
+select * from newtypestbl where da<'1970-02-15';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da<='1970-02-20';
+select * from newtypestbl where da<='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da<='1970-02-20';
+select * from newtypestbl where da<='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da<='1970-02-27';
+select * from newtypestbl where da<='1970-02-27';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da<='1970-02-27';
+select * from newtypestbl where da<='1970-02-27';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
+select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
+select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
+select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
+select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
+select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
+select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22';
+select * from newtypestbl where da between '1970-02-19' and '1970-02-22';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-22';
+select * from newtypestbl where da between '1970-02-19' and '1970-02-22';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28';
+select * from newtypestbl where da between '1970-02-19' and '1970-02-28';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da between '1970-02-19' and '1970-02-28';
+select * from newtypestbl where da between '1970-02-19' and '1970-02-28';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19';
+select * from newtypestbl where da between '1970-02-18' and '1970-02-19';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where da between '1970-02-18' and '1970-02-19';
+select * from newtypestbl where da between '1970-02-18' and '1970-02-19';

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
index 679164b..cf7cba0 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
@@ -4,160 +4,160 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl;
 
 -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where d=0.22;
+select * from newtypestbl where d=0.22;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d=0.22;
+select * from newtypestbl where d=0.22;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d='0.22';
+select * from newtypestbl where d='0.22';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d='0.22';
+select * from newtypestbl where d='0.22';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d=cast('0.22' as float);
+select * from newtypestbl where d=cast('0.22' as float);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d=cast('0.22' as float);
+select * from newtypestbl where d=cast('0.22' as float);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d!=0.22;
+select * from newtypestbl where d!=0.22;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d!=0.22;
+select * from newtypestbl where d!=0.22;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d!='0.22';
+select * from newtypestbl where d!='0.22';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d!='0.22';
+select * from newtypestbl where d!='0.22';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float);
+select * from newtypestbl where d!=cast('0.22' as float);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d!=cast('0.22' as float);
+select * from newtypestbl where d!=cast('0.22' as float);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<11.22;
+select * from newtypestbl where d<11.22;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<11.22;
+select * from newtypestbl where d<11.22;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<'11.22';
+select * from newtypestbl where d<'11.22';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<'11.22';
+select * from newtypestbl where d<'11.22';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<cast('11.22' as float);
+select * from newtypestbl where d<cast('11.22' as float);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<cast('11.22' as float);
+select * from newtypestbl where d<cast('11.22' as float);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<1;
+select * from newtypestbl where d<1;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<1;
+select * from newtypestbl where d<1;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<=11.22;
+select * from newtypestbl where d<=11.22;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<=11.22;
+select * from newtypestbl where d<=11.22;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<='11.22';
+select * from newtypestbl where d<='11.22';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<='11.22';
+select * from newtypestbl where d<='11.22';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float);
+select * from newtypestbl where d<=cast('11.22' as float);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<=cast('11.22' as float);
+select * from newtypestbl where d<=cast('11.22' as float);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal);
+select * from newtypestbl where d<=cast('11.22' as decimal);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<=cast('11.22' as decimal);
+select * from newtypestbl where d<=cast('11.22' as decimal);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<=11.22BD;
+select * from newtypestbl where d<=11.22BD;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<=11.22BD;
+select * from newtypestbl where d<=11.22BD;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d<=12;
+select * from newtypestbl where d<=12;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d<=12;
+select * from newtypestbl where d<=12;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0');
+select * from newtypestbl where d in ('0.22', '1.0');
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d in ('0.22', '1.0');
+select * from newtypestbl where d in ('0.22', '1.0');
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22');
+select * from newtypestbl where d in ('0.22', '11.22');
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d in ('0.22', '11.22');
+select * from newtypestbl where d in ('0.22', '11.22');
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0');
+select * from newtypestbl where d in ('0.9', '1.0');
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d in ('0.9', '1.0');
+select * from newtypestbl where d in ('0.9', '1.0');
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22);
+select * from newtypestbl where d in ('0.9', 0.22);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22);
+select * from newtypestbl where d in ('0.9', 0.22);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float));
+select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float));
+select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d between 0 and 1;
+select * from newtypestbl where d between 0 and 1;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d between 0 and 1;
+select * from newtypestbl where d between 0 and 1;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d between 0 and 1000;
+select * from newtypestbl where d between 0 and 1000;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d between 0 and 1000;
+select * from newtypestbl where d between 0 and 1000;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d between 0 and '2.0';
+select * from newtypestbl where d between 0 and '2.0';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d between 0 and '2.0';
+select * from newtypestbl where d between 0 and '2.0';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float);
+select * from newtypestbl where d between 0 and cast(3 as float);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d between 0 and cast(3 as float);
+select * from newtypestbl where d between 0 and cast(3 as float);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10));
+select * from newtypestbl where d between 1 and cast(30 as char(10));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where d between 1 and cast(30 as char(10));
+select * from newtypestbl where d between 1 and cast(30 as char(10));

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q b/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
index e0802a0..6ed1e55 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
@@ -5,94 +5,94 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2 limit 10) uniontbl;
 
 -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
+select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
+select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
+select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
+select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
+select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp));
+select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp);
+select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp);

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q b/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
index be50ca2..41bf7df 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
@@ -5,72 +5,72 @@ SET mapred.max.split.size=5000;
 
 create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where v="bee";
+select * from newtypestbl where v="bee";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v="bee";
+select * from newtypestbl where v="bee";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v!="bee";
+select * from newtypestbl where v!="bee";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v!="bee";
+select * from newtypestbl where v!="bee";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v<"world";
+select * from newtypestbl where v<"world";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v<"world";
+select * from newtypestbl where v<"world";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v<="world";
+select * from newtypestbl where v<="world";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v<="world";
+select * from newtypestbl where v<="world";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v="bee   ";
+select * from newtypestbl where v="bee   ";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v="bee   ";
+select * from newtypestbl where v="bee   ";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v in ("bee", "orange");
+select * from newtypestbl where v in ("bee", "orange");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v in ("bee", "orange");
+select * from newtypestbl where v in ("bee", "orange");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v in ("bee", "world");
+select * from newtypestbl where v in ("bee", "world");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v in ("bee", "world");
+select * from newtypestbl where v in ("bee", "world");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v in ("orange");
+select * from newtypestbl where v in ("orange");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v in ("orange");
+select * from newtypestbl where v in ("orange");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v between "bee" and "orange";
+select * from newtypestbl where v between "bee" and "orange";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v between "bee" and "orange";
+select * from newtypestbl where v between "bee" and "orange";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v between "bee" and "zombie";
+select * from newtypestbl where v between "bee" and "zombie";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v between "bee" and "zombie";
+select * from newtypestbl where v between "bee" and "zombie";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypestbl where v between "orange" and "pine";
+select * from newtypestbl where v between "orange" and "pine";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypestbl where v between "orange" and "pine";
\ No newline at end of file
+select * from newtypestbl where v between "orange" and "pine";
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q b/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
index 32767e8..3f97dec 100644
--- a/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
+++ b/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
@@ -60,28 +60,32 @@ SET hive.optimize.index.filter=true;
 SELECT * FROM tbl_pred WHERE t>2 limit 1;
 SET hive.optimize.index.filter=false;
 
-SELECT SUM(HASH(t)) FROM tbl_pred
+SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
-  AND t > -2;
+  AND t > -2
+  LIMIT 10;
 
 SET hive.optimize.index.filter=true;
-SELECT SUM(HASH(t)) FROM tbl_pred
+SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
-  AND t > -2;
+  AND t > -2
+  LIMIT 10;
 SET hive.optimize.index.filter=false;
 
-EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
-  AND t > -2;
+  AND t > -2
+  LIMIT 10;
 
 SET hive.optimize.index.filter=true;
-EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
-  AND t > -2;
+  AND t > -2
+  LIMIT 10;
 SET hive.optimize.index.filter=false;
 
 SELECT t, s FROM tbl_pred

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
index 78b7aa6..51ea879 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_boolean.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -18,183 +18,253 @@ POSTHOOK: Lineage: newtypestbl.b EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.c EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b=true
+PREHOOK: query: select * from newtypestbl where b=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b=true
+POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b!=true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b!=true
+POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<true
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<true
+POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b>true
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b>true
+POSTHOOK: query: select * from newtypestbl where b>true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<=true
+PREHOOK: query: select * from newtypestbl where b<=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<=true
+POSTHOOK: query: select * from newtypestbl where b<=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475822500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b=false
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b=false
+POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b!=false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b!=false
+POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<false
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<false
+POSTHOOK: query: select * from newtypestbl where b<false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b>false
+PREHOOK: query: select * from newtypestbl where b>false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b>false
+POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<=false
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<=false
+POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b=true
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b=true
+POSTHOOK: query: select * from newtypestbl where b=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b!=true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b!=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b!=true
+POSTHOOK: query: select * from newtypestbl where b!=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<true
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b<true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<true
+POSTHOOK: query: select * from newtypestbl where b<true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b>true
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b>true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b>true
+POSTHOOK: query: select * from newtypestbl where b>true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<=true
+PREHOOK: query: select * from newtypestbl where b<=true
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<=true
+POSTHOOK: query: select * from newtypestbl where b<=true
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475822500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b=false
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+hello     	world	11.22	false
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b=false
+POSTHOOK: query: select * from newtypestbl where b=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b!=false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+PREHOOK: query: select * from newtypestbl where b!=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b!=false
+POSTHOOK: query: select * from newtypestbl where b!=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<false
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b<false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<false
+POSTHOOK: query: select * from newtypestbl where b<false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b>false
+PREHOOK: query: select * from newtypestbl where b>false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b>false
+POSTHOOK: query: select * from newtypestbl where b>false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951953500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where b<=false
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+apple     	bee	0.22	true
+PREHOOK: query: select * from newtypestbl where b<=false
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where b<=false
+POSTHOOK: query: select * from newtypestbl where b<=false
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427776000
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false
+hello     	world	11.22	false

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
index e62462c..af4a13c 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -19,202 +19,290 @@ POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.da EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
 PREHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where c="apple"
+select * from newtypestbl where c="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
 POSTHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where c="apple"
+select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c="apple"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c="apple"
+POSTHOOK: query: select * from newtypestbl where c="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c!="apple"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c!="apple"
+POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c!="apple"
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where c!="apple"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c!="apple"
+POSTHOOK: query: select * from newtypestbl where c!="apple"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c<"hello"
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c<"hello"
+POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c<"hello"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c<"hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c<"hello"
+POSTHOOK: query: select * from newtypestbl where c<"hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c<="hello"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c<="hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c<="hello"
+POSTHOOK: query: select * from newtypestbl where c<="hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c<="hello"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c<="hello"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c<="hello"
+POSTHOOK: query: select * from newtypestbl where c<="hello"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c="apple "
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c="apple "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c="apple "
+POSTHOOK: query: select * from newtypestbl where c="apple "
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c="apple "
+PREHOOK: query: select * from newtypestbl where c="apple "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c="apple "
+POSTHOOK: query: select * from newtypestbl where c="apple "
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "carrot")
+PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "carrot")
+POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "carrot")
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "carrot")
+POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "hello")
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c in ("apple", "hello")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "hello")
+POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "hello")
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c in ("apple", "hello")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("apple", "hello")
+POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("carrot")
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c in ("carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("carrot")
+POSTHOOK: query: select * from newtypestbl where c in ("carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c in ("carrot")
+PREHOOK: query: select * from newtypestbl where c in ("carrot")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c in ("carrot")
+POSTHOOK: query: select * from newtypestbl where c in ("carrot")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "carrot"
+PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "carrot"
+POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "carrot"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "carrot"
+POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "zombie"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "zombie"
+POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "zombie"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "apple" and "zombie"
+POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1"
+POSTHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1"
+PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where c between "carrot" and "carrot1"
+POSTHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL


[12/24] hive git commit: HIVE-11781 : Remove HiveLimit operator (Jesus Camacho Rodriguez via Ashutosh Chauhan)

Posted by pr...@apache.org.
HIVE-11781 : Remove HiveLimit operator (Jesus Camacho Rodriguez via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1fffd5a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1fffd5a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1fffd5a

Branch: refs/heads/llap
Commit: b1fffd5a8a8aa10f19c03f79cf02d8147222ccc8
Parents: 753fed6
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Sep 10 02:46:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Sep 11 17:12:21 2015 -0700

----------------------------------------------------------------------
 .../calcite/reloperators/HiveLimit.java         |  57 ----------
 .../calcite/reloperators/HiveSort.java          | 110 -------------------
 .../calcite/reloperators/HiveSortLimit.java     | 110 +++++++++++++++++++
 .../calcite/stats/HiveRelMdMemory.java          |   9 +-
 .../calcite/stats/HiveRelMdParallelism.java     |   4 +-
 .../calcite/translator/ASTConverter.java        |  24 ++--
 .../calcite/translator/HiveOpConverter.java     |   8 +-
 .../translator/PlanModifierForASTConv.java      |  10 +-
 .../calcite/translator/PlanModifierUtil.java    |   4 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  10 +-
 10 files changed, 142 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
deleted file mode 100644
index 74991d6..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-
-import java.util.List;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptCost;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.SingleRel;
-import org.apache.calcite.rel.metadata.RelMetadataQuery;
-import org.apache.calcite.rex.RexNode;
-import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-
-public class HiveLimit extends SingleRel implements HiveRelNode {
-  private final RexNode offset;
-  private final RexNode fetch;
-
-  HiveLimit(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset,
-      RexNode fetch) {
-    super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child);
-    this.offset = offset;
-    this.fetch = fetch;
-    assert getConvention() instanceof HiveRelNode;
-    assert getConvention() == child.getConvention();
-  }
-
-  @Override
-  public HiveLimit copy(RelTraitSet traitSet, List<RelNode> newInputs) {
-    return new HiveLimit(getCluster(), traitSet, sole(newInputs), offset, fetch);
-  }
-
-  public void implement(Implementor implementor) {
-  }
-
-  @Override
-  public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return RelMetadataQuery.getNonCumulativeCost(this);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
deleted file mode 100644
index 1df6542..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-
-import java.util.Map;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rex.RexNode;
-import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-
-import com.google.common.collect.ImmutableMap;
-
-public class HiveSort extends Sort implements HiveRelNode {
-
-  public static final HiveSortRelFactory HIVE_SORT_REL_FACTORY = new HiveSortRelFactory();
-
-  // NOTE: this is to work around Hive Calcite Limitations w.r.t OB.
-  // 1. Calcite can not accept expressions in OB; instead it needs to be expressed
-  // as VC in input Select.
-  // 2. Hive can not preserve ordering through select boundaries.
-  // 3. This map is used for outermost OB to migrate the VC corresponding OB
-  // expressions from input select.
-  // 4. This is used by ASTConverter after we are done with Calcite Planning
-  private ImmutableMap<Integer, RexNode> mapOfInputRefToRexCall;
-
-  public HiveSort(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
-      RelCollation collation, RexNode offset, RexNode fetch) {
-    super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation,
-        offset, fetch);
-  }
-
-  /**
-   * Creates a HiveSort.
-   *
-   * @param input     Input relational expression
-   * @param collation array of sort specifications
-   * @param offset    Expression for number of rows to discard before returning
-   *                  first row
-   * @param fetch     Expression for number of rows to fetch
-   */
-  public static HiveSort create(RelNode input, RelCollation collation,
-      RexNode offset, RexNode fetch) {
-    RelOptCluster cluster = input.getCluster();
-    collation = RelCollationTraitDef.INSTANCE.canonize(collation);
-    RelTraitSet traitSet =
-        TraitsUtil.getSortTraitSet(cluster, input.getTraitSet(), collation);
-    return new HiveSort(cluster, traitSet, input, collation, offset, fetch);
-  }
-
-  @Override
-  public HiveSort copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation,
-      RexNode offset, RexNode fetch) {
-    // TODO: can we blindly copy sort trait? What if inputs changed and we
-    // are now sorting by different cols
-    RelCollation canonizedCollation = traitSet.canonize(newCollation);
-    return new HiveSort(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
-  }
-
-  public RexNode getFetchExpr() {
-    return fetch;
-  }
-
-  public void setInputRefToCallMap(ImmutableMap<Integer, RexNode> refToCall) {
-    this.mapOfInputRefToRexCall = refToCall;
-  }
-
-  public Map<Integer, RexNode> getInputRefToCallMap() {
-    return this.mapOfInputRefToRexCall;
-  }
-
-  @Override
-  public void implement(Implementor implementor) {
-  }
-
-  private static class HiveSortRelFactory implements RelFactories.SortFactory {
-
-    @Override
-    public RelNode createSort(RelTraitSet traits, RelNode input, RelCollation collation,
-        RexNode offset, RexNode fetch) {
-      return createSort(input, collation, offset, fetch);
-    }
-
-    @Override
-    public RelNode createSort(RelNode input, RelCollation collation, RexNode offset,
-        RexNode fetch) {
-      return create(input, collation, offset, fetch);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
new file mode 100644
index 0000000..82fdc0e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortLimit.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import java.util.Map;
+
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+
+import com.google.common.collect.ImmutableMap;
+
+public class HiveSortLimit extends Sort implements HiveRelNode {
+
+  public static final HiveSortRelFactory HIVE_SORT_REL_FACTORY = new HiveSortRelFactory();
+
+  // NOTE: this is to work around Hive Calcite Limitations w.r.t OB.
+  // 1. Calcite can not accept expressions in OB; instead it needs to be expressed
+  // as VC in input Select.
+  // 2. Hive can not preserve ordering through select boundaries.
+  // 3. This map is used for outermost OB to migrate the VC corresponding OB
+  // expressions from input select.
+  // 4. This is used by ASTConverter after we are done with Calcite Planning
+  private ImmutableMap<Integer, RexNode> mapOfInputRefToRexCall;
+
+  public HiveSortLimit(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+      RelCollation collation, RexNode offset, RexNode fetch) {
+    super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation,
+        offset, fetch);
+  }
+
+  /**
+   * Creates a HiveSortLimit.
+   *
+   * @param input     Input relational expression
+   * @param collation array of sort specifications
+   * @param offset    Expression for number of rows to discard before returning
+   *                  first row
+   * @param fetch     Expression for number of rows to fetch
+   */
+  public static HiveSortLimit create(RelNode input, RelCollation collation,
+      RexNode offset, RexNode fetch) {
+    RelOptCluster cluster = input.getCluster();
+    collation = RelCollationTraitDef.INSTANCE.canonize(collation);
+    RelTraitSet traitSet =
+        TraitsUtil.getSortTraitSet(cluster, input.getTraitSet(), collation);
+    return new HiveSortLimit(cluster, traitSet, input, collation, offset, fetch);
+  }
+
+  @Override
+  public HiveSortLimit copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation,
+      RexNode offset, RexNode fetch) {
+    // TODO: can we blindly copy sort trait? What if inputs changed and we
+    // are now sorting by different cols
+    RelCollation canonizedCollation = traitSet.canonize(newCollation);
+    return new HiveSortLimit(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
+  }
+
+  public RexNode getFetchExpr() {
+    return fetch;
+  }
+
+  public void setInputRefToCallMap(ImmutableMap<Integer, RexNode> refToCall) {
+    this.mapOfInputRefToRexCall = refToCall;
+  }
+
+  public Map<Integer, RexNode> getInputRefToCallMap() {
+    return this.mapOfInputRefToRexCall;
+  }
+
+  @Override
+  public void implement(Implementor implementor) {
+  }
+
+  private static class HiveSortRelFactory implements RelFactories.SortFactory {
+
+    @Override
+    public RelNode createSort(RelTraitSet traits, RelNode input, RelCollation collation,
+        RexNode offset, RexNode fetch) {
+      return createSort(input, collation, offset, fetch);
+    }
+
+    @Override
+    public RelNode createSort(RelNode input, RelCollation collation, RexNode offset,
+        RexNode fetch) {
+      return create(input, collation, offset, fetch);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
index 1a2e6d1..bea5943 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
@@ -26,9 +26,8 @@ import org.apache.calcite.util.BuiltInMethod;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
 
@@ -73,15 +72,11 @@ public class HiveRelMdMemory extends RelMdMemory {
     return join.getCumulativeMemoryWithinPhaseSplit();
   }
 
-  public Double memory(HiveLimit limit) {
-    return 0.0;
-  }
-
   public Double memory(HiveProject project) {
     return 0.0;
   }
 
-  public Double memory(HiveSort sort) {
+  public Double memory(HiveSortLimit sort) {
     if (sort.getCollation() != RelCollations.EMPTY) {
       // It sorts
       final Double avgRowSize = RelMetadataQuery.getAverageRowSize(sort.getInput());

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
index c7afea9..2f51d3b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
@@ -27,7 +27,7 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.util.BuiltInMethod;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 
 public class HiveRelMdParallelism extends RelMdParallelism {
@@ -52,7 +52,7 @@ public class HiveRelMdParallelism extends RelMdParallelism {
     return join.isPhaseTransition();
   }
 
-  public Boolean isPhaseTransition(HiveSort sort) {
+  public Boolean isPhaseTransition(HiveSortLimit sort) {
     // As Exchange operator is introduced later on, we make a
     // sort operator create a new stage for the moment
     return true;

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index b6995c9..14946b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -204,17 +204,17 @@ public class ASTConverter {
      * to its src/from. Hence the need to pass in sort for each block from
      * its parent.
      */
-    convertOBToASTNode((HiveSort) order);
+    convertOBToASTNode((HiveSortLimit) order);
 
     // 8. Limit
-    convertLimitToASTNode((HiveSort) limit);
+    convertLimitToASTNode((HiveSortLimit) limit);
 
     return hiveAST.getAST();
   }
 
-  private void convertLimitToASTNode(HiveSort limit) {
+  private void convertLimitToASTNode(HiveSortLimit limit) {
     if (limit != null) {
-      HiveSort hiveLimit = limit;
+      HiveSortLimit hiveLimit = limit;
       RexNode limitExpr = hiveLimit.getFetchExpr();
       if (limitExpr != null) {
         Object val = ((RexLiteral) limitExpr).getValue2();
@@ -223,18 +223,18 @@ public class ASTConverter {
     }
   }
 
-  private void convertOBToASTNode(HiveSort order) {
+  private void convertOBToASTNode(HiveSortLimit order) {
     if (order != null) {
-      HiveSort hiveSort = order;
-      if (!hiveSort.getCollation().getFieldCollations().isEmpty()) {
+      HiveSortLimit hiveSortLimit = order;
+      if (!hiveSortLimit.getCollation().getFieldCollations().isEmpty()) {
         // 1 Add order by token
         ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY");
 
-        schema = new Schema(hiveSort);
-        Map<Integer, RexNode> obRefToCallMap = hiveSort.getInputRefToCallMap();
+        schema = new Schema(hiveSortLimit);
+        Map<Integer, RexNode> obRefToCallMap = hiveSortLimit.getInputRefToCallMap();
         RexNode obExpr;
         ASTNode astCol;
-        for (RelFieldCollation c : hiveSort.getCollation().getFieldCollations()) {
+        for (RelFieldCollation c : hiveSortLimit.getCollation().getFieldCollations()) {
 
           // 2 Add Direction token
           ASTNode directionAST = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder
@@ -651,7 +651,7 @@ public class ASTConverter {
      *          Hive Sort Node
      * @return Schema
      */
-    public Schema(HiveSort order) {
+    public Schema(HiveSortLimit order) {
       Project select = (Project) order.getInput();
       for (String projName : select.getRowType().getFieldNames()) {
         add(new ColumnInfo(null, projName));

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 3f66893..f6c0114 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortExchange;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
@@ -171,8 +171,8 @@ public class HiveOpConverter {
       return visit(hj);
     } else if (rn instanceof HiveFilter) {
       return visit((HiveFilter) rn);
-    } else if (rn instanceof HiveSort) {
-      return visit((HiveSort) rn);
+    } else if (rn instanceof HiveSortLimit) {
+      return visit((HiveSortLimit) rn);
     } else if (rn instanceof HiveUnion) {
       return visit((HiveUnion) rn);
     } else if (rn instanceof HiveSortExchange) {
@@ -398,7 +398,7 @@ public class HiveOpConverter {
     return HiveGBOpConvUtil.translateGB(inputOpAf, aggRel, hiveConf);
   }
 
-  OpAttr visit(HiveSort sortRel) throws SemanticException {
+  OpAttr visit(HiveSortLimit sortRel) throws SemanticException {
     OpAttr inputOpAf = dispatch(sortRel.getInput());
 
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
index 5cd3a06..67f17c2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
@@ -141,12 +141,12 @@ public class PlanModifierForASTConv {
         if (!validFilterParent(rel, parent)) {
           introduceDerivedTable(rel, parent);
         }
-      } else if (rel instanceof HiveSort) {
+      } else if (rel instanceof HiveSortLimit) {
         if (!validSortParent(rel, parent)) {
           introduceDerivedTable(rel, parent);
         }
-        if (!validSortChild((HiveSort) rel)) {
-          introduceDerivedTable(((HiveSort) rel).getInput(), rel);
+        if (!validSortChild((HiveSortLimit) rel)) {
+          introduceDerivedTable(((HiveSortLimit) rel).getInput(), rel);
         }
       } else if (rel instanceof HiveAggregate) {
         RelNode newParent = parent;
@@ -297,7 +297,7 @@ public class PlanModifierForASTConv {
     return validParent;
   }
 
-  private static boolean validSortChild(HiveSort sortNode) {
+  private static boolean validSortChild(HiveSortLimit sortNode) {
     boolean validChild = true;
     RelNode child = sortNode.getInput();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java
index 3e2fae9..988d6d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -53,7 +53,7 @@ public class PlanModifierUtil {
         || !HiveCalciteUtil.orderRelNode(topSelparentPair.getKey())) {
       return;
     }
-    HiveSort obRel = (HiveSort) topSelparentPair.getKey();
+    HiveSortLimit obRel = (HiveSortLimit) topSelparentPair.getKey();
     Project obChild = (Project) topSelparentPair.getValue();
     if (obChild.getRowType().getFieldCount() <= resultSchema.size()) {
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/b1fffd5a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 73ae7c4..86bdf7e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -131,7 +131,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
@@ -922,7 +922,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         // thus we run the field trimmer again to push them back down
         HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
             HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY,
-            HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSort.HIVE_SORT_REL_FACTORY,
+            HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY,
             HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
         calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan);
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
@@ -1003,7 +1003,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // 5. Projection Pruning
       HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
           HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY,
-          HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSort.HIVE_SORT_REL_FACTORY,
+          HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY,
           HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
       basePlan = fieldTrimmer.trim(basePlan);
 
@@ -2266,7 +2266,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         // 4. Construct SortRel
         RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION);
         RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
-        sortRel = new HiveSort(cluster, traitSet, obInputRel, canonizedCollation, null, null);
+        sortRel = new HiveSortLimit(cluster, traitSet, obInputRel, canonizedCollation, null, null);
 
         // 5. Update the maps
         // NOTE: Output RR for SortRel is considered same as its input; we may
@@ -2292,7 +2292,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         RexNode fetch = cluster.getRexBuilder().makeExactLiteral(BigDecimal.valueOf(limit));
         RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION);
         RelCollation canonizedCollation = traitSet.canonize(RelCollations.EMPTY);
-        sortRel = new HiveSort(cluster, traitSet, srcRel, canonizedCollation, null, fetch);
+        sortRel = new HiveSortLimit(cluster, traitSet, srcRel, canonizedCollation, null, fetch);
 
         RowResolver outputRR = new RowResolver();
         if (!RowResolver.add(outputRR, relToHiveRR.get(srcRel))) {


[13/24] hive git commit: HIVE-11771: Parquet timestamp conversion errors (Jimmy, reviewed by Szehon)

Posted by pr...@apache.org.
HIVE-11771: Parquet timestamp conversion errors (Jimmy, reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1e97b161
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1e97b161
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1e97b161

Branch: refs/heads/llap
Commit: 1e97b16181941f8c21684f4b7a4958b890ef7738
Parents: b1fffd5
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Wed Sep 9 13:26:06 2015 -0700
Committer: Jimmy Xiang <jx...@cloudera.com>
Committed: Sat Sep 12 14:43:14 2015 -0700

----------------------------------------------------------------------
 .../ql/io/parquet/timestamp/NanoTimeUtils.java  | 23 +++++++++---
 .../serde/TestParquetTimestampUtils.java        | 38 +++++++++++++++++++-
 2 files changed, 56 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1e97b161/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
index 59c9b4a..aace48e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
@@ -15,6 +15,7 @@ package org.apache.hadoop.hive.ql.io.parquet.timestamp;
 
 import java.sql.Timestamp;
 import java.util.Calendar;
+import java.util.GregorianCalendar;
 import java.util.TimeZone;
 import java.util.concurrent.TimeUnit;
 
@@ -28,6 +29,7 @@ public class NanoTimeUtils {
    static final long NANOS_PER_HOUR = TimeUnit.HOURS.toNanos(1);
    static final long NANOS_PER_MINUTE = TimeUnit.MINUTES.toNanos(1);
    static final long NANOS_PER_SECOND = TimeUnit.SECONDS.toNanos(1);
+   static final long NANOS_PER_DAY = TimeUnit.DAYS.toNanos(1);
 
    private static final ThreadLocal<Calendar> parquetGMTCalendar = new ThreadLocal<Calendar>();
    private static final ThreadLocal<Calendar> parquetLocalCalendar = new ThreadLocal<Calendar>();
@@ -48,14 +50,20 @@ public class NanoTimeUtils {
    }
 
    private static Calendar getCalendar(boolean skipConversion) {
-     return skipConversion ? getLocalCalendar() : getGMTCalendar();
+     Calendar calendar = skipConversion ? getLocalCalendar() : getGMTCalendar();
+     calendar.clear(); // Reset all fields before reusing this instance
+     return calendar;
    }
 
    public static NanoTime getNanoTime(Timestamp ts, boolean skipConversion) {
 
      Calendar calendar = getCalendar(skipConversion);
      calendar.setTime(ts);
-     JDateTime jDateTime = new JDateTime(calendar.get(Calendar.YEAR),
+     int year = calendar.get(Calendar.YEAR);
+     if (calendar.get(Calendar.ERA) == GregorianCalendar.BC) {
+       year = 1 - year;
+     }
+     JDateTime jDateTime = new JDateTime(year,
        calendar.get(Calendar.MONTH) + 1,  //java calendar index starting at 1.
        calendar.get(Calendar.DAY_OF_MONTH));
      int days = jDateTime.getJulianDayNumber();
@@ -74,13 +82,20 @@ public class NanoTimeUtils {
      int julianDay = nt.getJulianDay();
      long nanosOfDay = nt.getTimeOfDayNanos();
 
+     long remainder = nanosOfDay;
+     julianDay += remainder / NANOS_PER_DAY;
+     remainder %= NANOS_PER_DAY;
+     if (remainder < 0) {
+       remainder += NANOS_PER_DAY;
+       julianDay--;
+     }
+
      JDateTime jDateTime = new JDateTime((double) julianDay);
      Calendar calendar = getCalendar(skipConversion);
      calendar.set(Calendar.YEAR, jDateTime.getYear());
-     calendar.set(Calendar.MONTH, jDateTime.getMonth() - 1); //java calender index starting at 1.
+     calendar.set(Calendar.MONTH, jDateTime.getMonth() - 1); //java calendar index starting at 1.
      calendar.set(Calendar.DAY_OF_MONTH, jDateTime.getDay());
 
-     long remainder = nanosOfDay;
      int hour = (int) (remainder / (NANOS_PER_HOUR));
      remainder = remainder % (NANOS_PER_HOUR);
      int minutes = (int) (remainder / (NANOS_PER_MINUTE));

http://git-wip-us.apache.org/repos/asf/hive/blob/1e97b161/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java
index 510ffd1..ec6def5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetTimestampUtils.java
@@ -16,7 +16,9 @@ package org.apache.hadoop.hive.ql.io.parquet.serde;
 import java.sql.Timestamp;
 import java.util.Calendar;
 import java.util.Date;
+import java.util.GregorianCalendar;
 import java.util.TimeZone;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.Assert;
 import junit.framework.TestCase;
@@ -74,7 +76,36 @@ public class TestParquetTimestampUtils extends TestCase {
     Timestamp ts2Fetched = NanoTimeUtils.getTimestamp(nt2, false);
     Assert.assertEquals(ts2Fetched, ts2);
     Assert.assertEquals(nt2.getJulianDay() - nt1.getJulianDay(), 30);
-  }
+
+    //check if 1464305 Julian Days between Jan 1, 2005 BC and Jan 31, 2005.
+    cal1 = Calendar.getInstance();
+    cal1.set(Calendar.ERA,  GregorianCalendar.BC);
+    cal1.set(Calendar.YEAR,  2005);
+    cal1.set(Calendar.MONTH, Calendar.JANUARY);
+    cal1.set(Calendar.DAY_OF_MONTH, 1);
+    cal1.set(Calendar.HOUR_OF_DAY, 0);
+    cal1.setTimeZone(TimeZone.getTimeZone("GMT"));
+
+    ts1 = new Timestamp(cal1.getTimeInMillis());
+    nt1 = NanoTimeUtils.getNanoTime(ts1, false);
+
+    ts1Fetched = NanoTimeUtils.getTimestamp(nt1, false);
+    Assert.assertEquals(ts1Fetched, ts1);
+
+    cal2 = Calendar.getInstance();
+    cal2.set(Calendar.YEAR,  2005);
+    cal2.set(Calendar.MONTH, Calendar.JANUARY);
+    cal2.set(Calendar.DAY_OF_MONTH, 31);
+    cal2.set(Calendar.HOUR_OF_DAY, 0);
+    cal2.setTimeZone(TimeZone.getTimeZone("UTC"));
+
+    ts2 = new Timestamp(cal2.getTimeInMillis());
+    nt2 = NanoTimeUtils.getNanoTime(ts2, false);
+
+    ts2Fetched = NanoTimeUtils.getTimestamp(nt2, false);
+    Assert.assertEquals(ts2Fetched, ts2);
+    Assert.assertEquals(nt2.getJulianDay() - nt1.getJulianDay(), 1464305);
+}
 
   public void testNanos() {
     //case 1: 01:01:01.0000000001
@@ -136,6 +167,11 @@ public class TestParquetTimestampUtils extends TestCase {
     NanoTime n1 = NanoTimeUtils.getNanoTime(ts1, false);
 
     Assert.assertEquals(n2.getTimeOfDayNanos() - n1.getTimeOfDayNanos(), 600000000009L);
+
+    NanoTime n3 = new NanoTime(n1.getJulianDay() - 1, n1.getTimeOfDayNanos() + TimeUnit.DAYS.toNanos(1));
+    Assert.assertEquals(ts1, NanoTimeUtils.getTimestamp(n3, false));
+    n3 = new NanoTime(n1.getJulianDay() + 3, n1.getTimeOfDayNanos() - TimeUnit.DAYS.toNanos(3));
+    Assert.assertEquals(ts1, NanoTimeUtils.getTimestamp(n3, false));
   }
 
   public void testTimezone() {


[11/24] hive git commit: HIVE-11751: hive-exec-log4j2.xml settings causes DEBUG messages to be generated and ignored (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by pr...@apache.org.
HIVE-11751: hive-exec-log4j2.xml settings causes DEBUG messages to be generated and ignored (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/753fed62
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/753fed62
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/753fed62

Branch: refs/heads/llap
Commit: 753fed62d639179ef30d8db4d58f44ecfa7d2ee4
Parents: f4361bf
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Sep 11 12:03:12 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Sep 11 12:03:12 2015 -0500

----------------------------------------------------------------------
 beeline/src/main/resources/beeline-log4j2.xml                   | 5 ++---
 common/src/main/resources/hive-log4j2.xml                       | 5 ++---
 common/src/test/resources/hive-exec-log4j2-test.xml             | 5 ++---
 common/src/test/resources/hive-log4j2-test.xml                  | 5 ++---
 data/conf/hive-log4j2.xml                                       | 5 ++---
 .../test/e2e/templeton/deployers/config/hive/hive-log4j2.xml    | 5 ++---
 hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml         | 5 ++---
 ql/src/main/resources/hive-exec-log4j2.xml                      | 5 ++---
 ql/src/main/resources/tez-container-log4j2.xml                  | 5 ++---
 testutils/ptest2/src/main/resources/log4j2.xml                  | 5 ++---
 10 files changed, 20 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/beeline/src/main/resources/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
index 2349c5a..a64f55e 100644
--- a/beeline/src/main/resources/beeline-log4j2.xml
+++ b/beeline/src/main/resources/beeline-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">WARN</Property>
     <Property name="hive.root.logger">console</Property>
   </Properties>
@@ -32,8 +31,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
     </Root>
   </Loggers>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/common/src/main/resources/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/common/src/main/resources/hive-log4j2.xml b/common/src/main/resources/hive-log4j2.xml
index 31b8fcc..3834547 100644
--- a/common/src/main/resources/hive-log4j2.xml
+++ b/common/src/main/resources/hive-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
     <Property name="hive.log.level">INFO</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
@@ -68,8 +67,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/common/src/test/resources/hive-exec-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-exec-log4j2-test.xml b/common/src/test/resources/hive-exec-log4j2-test.xml
index 1d91b26..03487de 100644
--- a/common/src/test/resources/hive-exec-log4j2-test.xml
+++ b/common/src/test/resources/hive-exec-log4j2-test.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">INFO</Property>
     <Property name="hive.root.logger">FA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>
@@ -41,8 +40,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/common/src/test/resources/hive-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-log4j2-test.xml b/common/src/test/resources/hive-log4j2-test.xml
index 98ca6f8..0297e88 100644
--- a/common/src/test/resources/hive-log4j2-test.xml
+++ b/common/src/test/resources/hive-log4j2-test.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">WARN</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>
@@ -50,8 +49,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
index 452f01f..48a584f 100644
--- a/data/conf/hive-log4j2.xml
+++ b/data/conf/hive-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">DEBUG</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/log</Property>
@@ -68,8 +67,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
index 30f7603..87e18e2 100644
--- a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
+++ b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
     <Property name="hive.log.level">DEBUG</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
@@ -68,8 +67,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
index 96f0974..ef8e73d 100644
--- a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
+++ b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="webhcat.log.threshold">DEBUG</Property>
     <Property name="webhcat.log.level">INFO</Property>
     <Property name="webhcat.root.logger">standard</Property>
     <Property name="webhcat.log.dir">.</Property>
@@ -48,8 +47,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:webhcat.log.threshold}">
-      <AppenderRef ref="${sys:webhcat.root.logger}" level="${sys:webhcat.log.level}"/>
+    <Root level="${sys:webhcat.log.level}">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
     </Root>
 
     <Logger name="com.sun.jersey" level="DEBUG">

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/ql/src/main/resources/hive-exec-log4j2.xml
----------------------------------------------------------------------
diff --git a/ql/src/main/resources/hive-exec-log4j2.xml b/ql/src/main/resources/hive-exec-log4j2.xml
index 8b520a2..8ed8b60 100644
--- a/ql/src/main/resources/hive-exec-log4j2.xml
+++ b/ql/src/main/resources/hive-exec-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">INFO</Property>
     <Property name="hive.root.logger">FA</Property>
     <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
@@ -67,8 +66,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    <Root level="${sys:hive.log.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/ql/src/main/resources/tez-container-log4j2.xml
----------------------------------------------------------------------
diff --git a/ql/src/main/resources/tez-container-log4j2.xml b/ql/src/main/resources/tez-container-log4j2.xml
index be949dc..604e586 100644
--- a/ql/src/main/resources/tez-container-log4j2.xml
+++ b/ql/src/main/resources/tez-container-log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="tez.container.log.threshold">ALL</Property>
     <Property name="tez.container.log.level">INFO</Property>
     <Property name="tez.container.root.logger">CLA</Property>
     <Property name="tez.container.log.dir">${sys:yarn.app.container.log.dir}</Property>
@@ -41,8 +40,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:tez.container.log.threshold}">
-      <AppenderRef ref="${sys:tez.container.root.logger}" level="${sys:tez.container.log.level}"/>
+    <Root level="${sys:tez.container.log.level}">
+      <AppenderRef ref="${sys:tez.container.root.logger}"/>
     </Root>
   </Loggers>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/753fed62/testutils/ptest2/src/main/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
index 42141b7..8eb3234 100644
--- a/testutils/ptest2/src/main/resources/log4j2.xml
+++ b/testutils/ptest2/src/main/resources/log4j2.xml
@@ -20,7 +20,6 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.ptest.log.threshold">DEBUG</Property>
     <Property name="hive.ptest.log.level">DEBUG</Property>
     <Property name="hive.ptest.root.logger">FILE</Property>
     <Property name="hive.ptest.log.dir">target</Property>
@@ -45,8 +44,8 @@
   </Appenders>
 
   <Loggers>
-    <Root level="${sys:hive.ptest.log.threshold}">
-      <AppenderRef ref="${sys:hive.ptest.root.logger}" level="${sys:hive.ptest.log.level}"/>
+    <Root level="${sys:hive.ptest.log.level}">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
     </Root>
 
     <Logger name="org.apache.http" level="INFO">


[16/24] hive git commit: HIVE-11763: Use * instead of sum(hash(*)) on Parquet predicate (PPD) integration tests (Sergio Pena, reviewed by Ferdinand Xu)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
index 745237d..3693879 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -19,274 +19,404 @@ POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.ts EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
 PREHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
+select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
 POSTHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
+select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
+POSTHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
+POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
+POSTHOOK: query: select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1033237945500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+PREHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1033237945500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
+PREHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
+PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1033237945500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1033237945500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as timestamp), cast('2011-01-20 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
+PREHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
+POSTHOOK: query: select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as timestamp), cast('2011-01-08 01:01:01' as timestamp))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-08 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-445653015500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2011-01-25 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-1478890961000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+hello     	world	11.22	2011-01-20 01:01:01
+apple     	bee	0.22	2011-01-01 01:01:01
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
+PREHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
+POSTHOOK: query: select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as timestamp) and cast('2010-11-01 01:01:01' as timestamp)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
index 23e3cd0..0574e5d 100644
--- a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out
@@ -6,11 +6,11 @@ POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@newtypestbl
-PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@newtypestbl
-POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@newtypestbl
@@ -19,202 +19,290 @@ POSTHOOK: Lineage: newtypestbl.d EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.da EXPRESSION []
 POSTHOOK: Lineage: newtypestbl.v EXPRESSION []
 PREHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where v="bee"
+select * from newtypestbl where v="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
 POSTHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
-select sum(hash(*)) from newtypestbl where v="bee"
+select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v="bee"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v="bee"
+POSTHOOK: query: select * from newtypestbl where v="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v!="bee"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v!="bee"
+POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v!="bee"
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where v!="bee"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v!="bee"
+POSTHOOK: query: select * from newtypestbl where v!="bee"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-334427804500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v<"world"
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+hello     	world	11.22	1970-02-27
+PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v<"world"
+POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v<"world"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v<"world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v<"world"
+POSTHOOK: query: select * from newtypestbl where v<"world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v<="world"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v<="world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v<="world"
+POSTHOOK: query: select * from newtypestbl where v<="world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v<="world"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v<="world"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v<="world"
+POSTHOOK: query: select * from newtypestbl where v<="world"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v="bee   "
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v="bee   "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v="bee   "
+POSTHOOK: query: select * from newtypestbl where v="bee   "
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v="bee   "
+PREHOOK: query: select * from newtypestbl where v="bee   "
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v="bee   "
+POSTHOOK: query: select * from newtypestbl where v="bee   "
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "orange")
+PREHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "orange")
+POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "orange")
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "orange")
+POSTHOOK: query: select * from newtypestbl where v in ("bee", "orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "world")
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v in ("bee", "world")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "world")
+POSTHOOK: query: select * from newtypestbl where v in ("bee", "world")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "world")
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v in ("bee", "world")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("bee", "world")
+POSTHOOK: query: select * from newtypestbl where v in ("bee", "world")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("orange")
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v in ("orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("orange")
+POSTHOOK: query: select * from newtypestbl where v in ("orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v in ("orange")
+PREHOOK: query: select * from newtypestbl where v in ("orange")
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v in ("orange")
+POSTHOOK: query: select * from newtypestbl where v in ("orange")
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "orange"
+PREHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "orange"
+POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "orange"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "orange"
+POSTHOOK: query: select * from newtypestbl where v between "bee" and "orange"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
--252951929000
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "zombie"
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "zombie"
+POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "zombie"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v between "bee" and "zombie"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "bee" and "zombie"
+POSTHOOK: query: select * from newtypestbl where v between "bee" and "zombie"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-81475875500
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "orange" and "pine"
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+hello     	world	11.22	1970-02-27
+apple     	bee	0.22	1970-02-20
+PREHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "orange" and "pine"
+POSTHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL
-PREHOOK: query: select sum(hash(*)) from newtypestbl where v between "orange" and "pine"
+PREHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(*)) from newtypestbl where v between "orange" and "pine"
+POSTHOOK: query: select * from newtypestbl where v between "orange" and "pine"
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@newtypestbl
 #### A masked pattern was here ####
-NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/66fb9601/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
index 1dc2937..aa3b272 100644
--- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
@@ -251,45 +251,65 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
 124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.4	yard duty
-PREHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
--8
-PREHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+-1	268	65778	4294967418	56.33	44.73	true	calvin falkner	2013-03-01 09:11:58.70322	7.37	history
+-1	281	65643	4294967323	15.1	45.0	false	irene nixon	2013-03-01 09:11:58.703223	80.96	undecided
+-1	300	65663	4294967343	71.26	34.62	true	calvin ovid	2013-03-01 09:11:58.703262	78.56	study skills
+-1	348	65556	4294967413	35.17	9.51	false	bob young	2013-03-01 09:11:58.70328	45.81	quiet hour
+-1	372	65680	4294967490	15.45	18.09	false	ethan laertes	2013-03-01 09:11:58.70311	65.88	opthamology
+-1	417	65685	4294967492	28.89	5.19	true	mike white	2013-03-01 09:11:58.703275	90.69	forestry
+-1	423	65663	4294967380	0.79	21.33	false	bob laertes	2013-03-01 09:11:58.703278	94.16	debate
+-1	433	65581	4294967299	86.92	23.15	false	yuri ellison	2013-03-01 09:11:58.703098	21.29	history
+PREHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: query: SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_pred
 #### A masked pattern was here ####
--8
-PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+-1	268	65778	4294967418	56.33	44.73	true	calvin falkner	2013-03-01 09:11:58.70322	7.37	history
+-1	281	65643	4294967323	15.1	45.0	false	irene nixon	2013-03-01 09:11:58.703223	80.96	undecided
+-1	300	65663	4294967343	71.26	34.62	true	calvin ovid	2013-03-01 09:11:58.703262	78.56	study skills
+-1	348	65556	4294967413	35.17	9.51	false	bob young	2013-03-01 09:11:58.70328	45.81	quiet hour
+-1	372	65680	4294967490	15.45	18.09	false	ethan laertes	2013-03-01 09:11:58.70311	65.88	opthamology
+-1	417	65685	4294967492	28.89	5.19	true	mike white	2013-03-01 09:11:58.703275	90.69	forestry
+-1	423	65663	4294967380	0.79	21.33	false	bob laertes	2013-03-01 09:11:58.703278	94.16	debate
+-1	433	65581	4294967299	86.92	23.15	false	yuri ellison	2013-03-01 09:11:58.703098	21.29	history
+PREHOOK: query: EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: query: EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -306,47 +326,37 @@ STAGE PLANS:
               predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
               Statistics: Num rows: 116 Data size: 1276 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: hash(t) (type: int)
-                outputColumnNames: _col0
+                expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 116 Data size: 1276 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: sum(_col0)
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 10
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: query: EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: query: EXPLAIN SELECT * FROM tbl_pred
   WHERE t IS NOT NULL
   AND t < 0
   AND t > -2
+  LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -364,35 +374,23 @@ STAGE PLANS:
               predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
               Statistics: Num rows: 116 Data size: 1276 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: hash(t) (type: int)
-                outputColumnNames: _col0
+                expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 116 Data size: 1276 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: sum(_col0)
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 10
       Processor Tree:
         ListSink
 


[07/24] hive git commit: HIVE-11727: Hive on Tez through Oozie: Some queries fail with fnf exception (Gunther Hagleitner, reviewed by Vikram Dixit K)

Posted by pr...@apache.org.
HIVE-11727: Hive on Tez through Oozie: Some queries fail with fnf exception (Gunther Hagleitner, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/594e25a2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/594e25a2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/594e25a2

Branch: refs/heads/llap
Commit: 594e25a230d8fabb7ae4e87faf3ceda689ef657e
Parents: 27bf8f0
Author: Gunther Hagleitner <gu...@apache.org>
Authored: Thu Sep 10 12:57:07 2015 -0700
Committer: Gunther Hagleitner <gu...@apache.org>
Committed: Thu Sep 10 12:57:07 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/594e25a2/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index f773cb9..19da1c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -1015,6 +1015,9 @@ public class DagUtils {
     conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVEPARTITIONER));
     conf.set("tez.runtime.partitioner.class", MRPartitioner.class.getName());
 
+    // Removing job credential entry/ cannot be set on the tasks
+    conf.unset("mapreduce.job.credentials.binary");
+
     Utilities.stripHivePasswordDetails(conf);
     return conf;
   }


[03/24] hive git commit: HIVE-11754 : Not reachable code parts in StatsUtils (Navis via Ashutosh Chauhan)

Posted by pr...@apache.org.
HIVE-11754 : Not reachable code parts in StatsUtils (Navis via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7a71e50d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7a71e50d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7a71e50d

Branch: refs/heads/llap
Commit: 7a71e50d456070272d802eedb4a8468a4a1ab4af
Parents: 9b11caf
Author: Navis Ryu <na...@apache.org>
Authored: Tue Sep 8 20:22:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Sep 9 23:48:39 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/stats/StatsUtils.java | 84 ++++++--------------
 1 file changed, 25 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7a71e50d/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 55aea0e..e1f38a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -66,32 +66,30 @@ import org.apache.hadoop.hive.serde2.objectinspector.StandardMapObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableBinaryObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableBooleanObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantBinaryObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantHiveCharObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantHiveVarcharObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantStringObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDateObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDoubleObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableFloatObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveCharObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveVarcharObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableIntObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableShortObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.tez.mapreduce.hadoop.MRJobConfig;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -569,7 +567,7 @@ public class StatsUtils {
    *          - hive conf
    * @param parts
    *          - partition list
-   * @return sizes of patitions
+   * @return sizes of partitions
    */
   public static List<Long> getFileSizeForPartitions(HiveConf conf, List<Partition> parts) {
     List<Long> sizes = Lists.newArrayList();
@@ -783,19 +781,9 @@ public class StatsUtils {
         ConstantObjectInspector coi = (ConstantObjectInspector) oi;
 
         // if writable constant is null then return size 0
-        if (coi.getWritableConstantValue() == null) {
-          return 0;
-        }
-
-        return coi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableConstantStringObjectInspector) {
-
-        // some UDFs return writable constant strings (fixed width)
-        // Ex: select upper("hello") from table
-        WritableConstantStringObjectInspector wcsoi = (WritableConstantStringObjectInspector) oi;
-
-        return wcsoi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableStringObjectInspector) {
+        Object constantValue = coi.getWritableConstantValue();
+        return constantValue == null ? 0 : constantValue.toString().length();
+      } else if (oi instanceof StringObjectInspector) {
 
         // some UDFs may emit strings of variable length. like pattern matching
         // UDFs. it's hard to find the length of such UDFs.
@@ -809,18 +797,11 @@ public class StatsUtils {
         ConstantObjectInspector coi = (ConstantObjectInspector) oi;
 
         // if writable constant is null then return size 0
-        if (coi.getWritableConstantValue() == null) {
-          return 0;
-        }
-
-        return coi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableConstantHiveVarcharObjectInspector) {
-
-        WritableConstantHiveVarcharObjectInspector wcsoi =
-            (WritableConstantHiveVarcharObjectInspector) oi;
-        return wcsoi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableHiveVarcharObjectInspector) {
-        return ((WritableHiveVarcharObjectInspector) oi).getMaxLength();
+        Object constantValue = coi.getWritableConstantValue();
+        return constantValue == null ? 0 : constantValue.toString().length();
+      } else if (oi instanceof HiveVarcharObjectInspector) {
+        VarcharTypeInfo type = (VarcharTypeInfo) ((HiveVarcharObjectInspector) oi).getTypeInfo();
+        return type.getLength();
       }
     } else if (colType.startsWith(serdeConstants.CHAR_TYPE_NAME)) {
 
@@ -829,18 +810,11 @@ public class StatsUtils {
         ConstantObjectInspector coi = (ConstantObjectInspector) oi;
 
         // if writable constant is null then return size 0
-        if (coi.getWritableConstantValue() == null) {
-          return 0;
-        }
-
-        return coi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableConstantHiveCharObjectInspector) {
-
-        WritableConstantHiveCharObjectInspector wcsoi =
-            (WritableConstantHiveCharObjectInspector) oi;
-        return wcsoi.getWritableConstantValue().toString().length();
-      } else if (oi instanceof WritableHiveCharObjectInspector) {
-        return ((WritableHiveCharObjectInspector) oi).getMaxLength();
+        Object constantValue = coi.getWritableConstantValue();
+        return constantValue == null ? 0 : constantValue.toString().length();
+      } else if (oi instanceof HiveCharObjectInspector) {
+        CharTypeInfo type = (CharTypeInfo) ((HiveCharObjectInspector) oi).getTypeInfo();
+        return type.getLength();
       }
     } else if (colType.equalsIgnoreCase(serdeConstants.BINARY_TYPE_NAME)) {
 
@@ -849,19 +823,9 @@ public class StatsUtils {
         ConstantObjectInspector coi = (ConstantObjectInspector) oi;
 
         // if writable constant is null then return size 0
-        if (coi.getWritableConstantValue() == null) {
-          return 0;
-        }
-
-        BytesWritable bw = ((BytesWritable) coi.getWritableConstantValue());
-        return bw.getLength();
-      } else if (oi instanceof WritableConstantBinaryObjectInspector) {
-
-        // writable constant byte arrays
-        WritableConstantBinaryObjectInspector wcboi = (WritableConstantBinaryObjectInspector) oi;
-
-        return wcboi.getWritableConstantValue().getLength();
-      } else if (oi instanceof WritableBinaryObjectInspector) {
+        BytesWritable constantValue = (BytesWritable)coi.getWritableConstantValue();
+        return constantValue == null ? 0 : constantValue.getLength();
+      } else if (oi instanceof BinaryObjectInspector) {
 
         // return the variable length from config
         return configVarLen;
@@ -1296,6 +1260,8 @@ public class StatsUtils {
       colType = enfd.getTypeString();
       countDistincts = numRows;
       oi = enfd.getWritableObjectInspector();
+    } else {
+      throw new IllegalArgumentException("not supported expr type " + end.getClass());
     }
 
     if (colType.equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME)


[08/24] hive git commit: HIVE-11605: Incorrect results with bucket map join in tez. (Vikram Dixit K, reviewed by Sergey Shelukhin)

Posted by pr...@apache.org.
HIVE-11605: Incorrect results with bucket map join in tez. (Vikram Dixit K, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ea8e296
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ea8e296
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ea8e296

Branch: refs/heads/llap
Commit: 4ea8e29619eb0bbb02e3f7c09ffc9d44bf4cdfef
Parents: 594e25a
Author: vikram <vi...@hortonworks.com>
Authored: Thu Sep 10 13:13:56 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Thu Sep 10 13:30:23 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |  18 ++-
 .../ql/optimizer/ReduceSinkMapJoinProc.java     |   8 +-
 .../clientpositive/bucket_map_join_tez1.q       |   9 ++
 .../spark/bucket_map_join_tez1.q.out            | 131 +++++++++++++++++++
 .../tez/bucket_map_join_tez1.q.out              | 123 +++++++++++++++++
 5 files changed, 280 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4ea8e296/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index e3acdfc..8ea1879 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -375,13 +375,13 @@ public class ConvertJoinMapJoin implements NodeProcessor {
       }
       ReduceSinkOperator rsOp = (ReduceSinkOperator) parentOp;
       if (checkColEquality(rsOp.getParentOperators().get(0).getOpTraits().getSortCols(), rsOp
-          .getOpTraits().getSortCols(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx) == false) {
+          .getOpTraits().getSortCols(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx, false) == false) {
         LOG.info("We cannot convert to SMB because the sort column names do not match.");
         return false;
       }
 
       if (checkColEquality(rsOp.getParentOperators().get(0).getOpTraits().getBucketColNames(), rsOp
-          .getOpTraits().getBucketColNames(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx)
+          .getOpTraits().getBucketColNames(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx, true)
           == false) {
         LOG.info("We cannot convert to SMB because bucket column names do not match.");
         return false;
@@ -428,7 +428,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
     int numBuckets = parentOfParent.getOpTraits().getNumBuckets();
     // all keys matched.
     if (checkColEquality(grandParentColNames, parentColNames, rs.getColumnExprMap(),
-        tezBucketJoinProcCtx) == false) {
+        tezBucketJoinProcCtx, true) == false) {
       LOG.info("No info available to check for bucket map join. Cannot convert");
       return false;
     }
@@ -446,7 +446,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
 
   private boolean checkColEquality(List<List<String>> grandParentColNames,
       List<List<String>> parentColNames, Map<String, ExprNodeDesc> colExprMap,
-      TezBucketJoinProcCtx tezBucketJoinProcCtx) {
+      TezBucketJoinProcCtx tezBucketJoinProcCtx, boolean strict) {
 
     if ((grandParentColNames == null) || (parentColNames == null)) {
       return false;
@@ -479,7 +479,15 @@ public class ConvertJoinMapJoin implements NodeProcessor {
           }
 
           if (colCount == parentColNames.get(0).size()) {
-            return true;
+            if (strict) {
+              if (colCount == listBucketCols.size()) {
+                return true;
+              } else {
+                return false;
+              }
+            } else {
+              return true;
+            }
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/4ea8e296/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
index b546838..71c766f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
@@ -226,10 +226,6 @@ public class ReduceSinkMapJoinProc implements NodeProcessor {
     int numBuckets = -1;
     EdgeType edgeType = EdgeType.BROADCAST_EDGE;
     if (joinConf.isBucketMapJoin()) {
-
-      // disable auto parallelism for bucket map joins
-      parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
-
       numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
       /*
        * Here, we can be in one of 4 states.
@@ -273,6 +269,10 @@ public class ReduceSinkMapJoinProc implements NodeProcessor {
     } else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
       edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
     }
+    if (edgeType == EdgeType.CUSTOM_EDGE) {
+      // disable auto parallelism for bucket map joins
+      parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
+    }
     TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
 
     if (mapJoinWork != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4ea8e296/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 4a7d63e..0f9dd6d 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -30,6 +30,15 @@ explain
 select a.key, a.value, b.value
 from tab a join tab_part b on a.key = b.key;
 
+explain
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key;
+
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key;
+
 -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
 -- In this case the sub-query is chosen as the big table.
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/4ea8e296/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
index 65bded2..34ddc90 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
@@ -183,6 +183,137 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: explain
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 key (type: int)
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 3 <- Reducer 2 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tab_part
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int), value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Local Work:
+              Map Reduce Local Work
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 _col0 (type: int)
+                      1 key (type: int)
+                    input vertices:
+                      1 Map 4
+                    Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+242
 PREHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
 -- In this case the sub-query is chosen as the big table.
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/4ea8e296/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
index 61c197f..8338672 100644
--- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
@@ -178,6 +178,129 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: explain
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tab_part
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int), value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 _col0 (type: int)
+                      1 key (type: int)
+                    input vertices:
+                      1 Map 4
+                    Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from 
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+242
 PREHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
 -- In this case the sub-query is chosen as the big table.
 explain


[23/24] hive git commit: HIVE-11792: User explain in tez does not preserve ordering (Prasanth Jayachandran reviewed by Pengcheng Xiong)

Posted by pr...@apache.org.
HIVE-11792: User explain in tez does not preserve ordering (Prasanth Jayachandran reviewed by Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/da0be3db
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/da0be3db
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/da0be3db

Branch: refs/heads/llap
Commit: da0be3db7741e59813adcc197dc545b57dcc3c0a
Parents: 8bcd07d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon Sep 14 02:57:19 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon Sep 14 02:57:19 2015 -0500

----------------------------------------------------------------------
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |    8 +-
 .../hive/common/jsonexplain/tez/Stage.java      |   14 +-
 .../common/jsonexplain/tez/TezJsonParser.java   |   17 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   24 +-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |    4 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |    9 +-
 .../authorization_explain.q.java1.7.out         |    2 +-
 .../authorization_explain.q.java1.8.out         |    2 +-
 .../clientpositive/explain_dependency.q.out     |   18 +-
 .../clientpositive/explain_dependency2.q.out    |   16 +-
 .../results/clientpositive/input4.q.java1.7.out |    2 +-
 .../results/clientpositive/input4.q.java1.8.out |    2 +-
 .../results/clientpositive/join0.q.java1.7.out  |    2 +-
 .../results/clientpositive/join0.q.java1.8.out  |    4 +-
 .../results/clientpositive/parallel_join0.q.out |    2 +-
 .../clientpositive/plan_json.q.java1.7.out      |    2 +-
 .../clientpositive/plan_json.q.java1.8.out      |    2 +-
 .../clientpositive/tez/constprog_dpp.q.out      |    4 +-
 .../clientpositive/tez/explainuser_1.q.out      |  496 +--
 .../clientpositive/tez/explainuser_2.q.out      | 3280 +++++++++---------
 .../clientpositive/tez/explainuser_3.q.out      |   10 +-
 21 files changed, 1953 insertions(+), 1967 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
index 9ecba7c..d0c1037 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -62,7 +62,7 @@ public final class Op {
       JSONObject mapjoinObj = opObject.getJSONObject("Map Join Operator");
       // get the map for posToVertex
       JSONObject verticeObj = mapjoinObj.getJSONObject("input vertices:");
-      Map<String, String> posToVertex = new HashMap<>();
+      Map<String, String> posToVertex = new LinkedHashMap<>();
       for (String pos : JSONObject.getNames(verticeObj)) {
         String vertexName = verticeObj.getString(pos);
         posToVertex.put(pos, vertexName);
@@ -83,7 +83,7 @@ public final class Op {
       // update the keys to use vertex name
       JSONObject keys = mapjoinObj.getJSONObject("keys:");
       if (keys.length() != 0) {
-        JSONObject newKeys = new JSONObject();
+        JSONObject newKeys = new JSONObject(new LinkedHashMap<>());
         for (String key : JSONObject.getNames(keys)) {
           String vertexName = posToVertex.get(key);
           if (vertexName != null) {
@@ -116,7 +116,7 @@ public final class Op {
   }
 
   /**
-   * @param out
+   * @param printer
    * @param indentFlag
    * @param branchOfJoinOp
    *          This parameter is used to show if it is a branch of a Join

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
index c5a78b5..455d59f 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -28,8 +27,6 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
 import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
@@ -176,16 +173,11 @@ public final class Stage {
    * @param opName
    * @param opObj
    * @return
-   * @throws JSONException
-   * @throws JsonParseException
-   * @throws JsonMappingException
-   * @throws IOException
    * @throws Exception
    *           This method address the create table operator, fetch operator,
    *           etc
    */
-  Op extractOp(String opName, JSONObject opObj) throws JSONException, JsonParseException,
-      JsonMappingException, IOException, Exception {
+  Op extractOp(String opName, JSONObject opObj) throws Exception {
     List<Attr> attrs = new ArrayList<>();
     Vertex v = null;
     if (opObj.length() > 0) {
@@ -198,7 +190,7 @@ public final class Stage {
           JSONObject attrObj = (JSONObject) o;
           if (attrObj.length() > 0) {
             if (name.equals("Processor Tree:")) {
-              JSONObject object = new JSONObject();
+              JSONObject object = new JSONObject(new LinkedHashMap<>());
               object.put(name, attrObj);
               v = new Vertex(null, object, parser);
               v.extractOpTree();
@@ -232,7 +224,7 @@ public final class Stage {
     return false;
   }
 
-  public void print(Printer printer, List<Boolean> indentFlag) throws JSONException, Exception {
+  public void print(Printer printer, List<Boolean> indentFlag) throws Exception {
     // print stagename
     if (parser.printSet.contains(this)) {
       printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous "

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
index c6ee4f6..20ce27b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
@@ -18,11 +18,10 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -31,27 +30,23 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.json.JSONException;
 import org.json.JSONObject;
 
 public final class TezJsonParser implements JsonParser {
-  public final Map<String, Stage> stages = new HashMap<String, Stage>();;
+  public final Map<String, Stage> stages = new LinkedHashMap<>();
   protected final Log LOG;
   // the object that has been printed.
-  public final Set<Object> printSet = new HashSet<>();
+  public final Set<Object> printSet = new LinkedHashSet<>();
   // the vertex that should be inlined. <Operator, list of Vertex that is
   // inlined>
-  public final Map<Op, List<Connection>> inlineMap = new HashMap<>();
+  public final Map<Op, List<Connection>> inlineMap = new LinkedHashMap<>();
 
   public TezJsonParser() {
     super();
     LOG = LogFactory.getLog(this.getClass().getName());
   }
 
-  public void extractStagesAndPlans(JSONObject inputObject) throws JSONException,
-      JsonParseException, JsonMappingException, Exception, IOException {
+  public void extractStagesAndPlans(JSONObject inputObject) throws Exception {
     // extract stages
     JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
     if (dependency != null && dependency.length() > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index 21de3cf..a74a8ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -97,7 +97,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       throws Exception {
     assert(work.getDependency());
 
-    JSONObject outJSONObject = new JSONObject();
+    JSONObject outJSONObject = new JSONObject(new LinkedHashMap<>());
     List<Map<String, String>> inputTableInfo = new ArrayList<Map<String, String>>();
     List<Map<String, String>> inputPartitionInfo = new ArrayList<Map<String, String>>();
     for (ReadEntity input: work.getInputs()) {
@@ -133,7 +133,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
   public JSONObject getJSONLogicalPlan(PrintStream out, ExplainWork work) throws Exception {
     isLogical = true;
 
-    JSONObject outJSONObject = new JSONObject();
+    JSONObject outJSONObject = new JSONObject(new LinkedHashMap<>());
     boolean jsonOutput = work.isFormatted();
     if (jsonOutput) {
       out = null;
@@ -181,7 +181,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
 
     // If the user asked for a formatted output, dump the json output
     // in the output stream
-    JSONObject outJSONObject = new JSONObject();
+    JSONObject outJSONObject = new JSONObject(new LinkedHashMap<>());
 
     if (jsonOutput) {
       out = null;
@@ -339,7 +339,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
     BaseSemanticAnalyzer analyzer = work.getAnalyzer();
     HiveOperation operation = SessionState.get().getHiveOperation();
 
-    JSONObject object = new JSONObject();
+    JSONObject object = new JSONObject(new LinkedHashMap<>());
     Object jsonInput = toJson("INPUTS", toString(analyzer.getInputs()), out, work);
     if (work.isFormatted()) {
       object.put("INPUTS", jsonInput);
@@ -402,7 +402,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
 
     TreeMap<Object, Object> tree = new TreeMap<Object, Object>();
     tree.putAll(mp);
-    JSONObject json = jsonOutput ? new JSONObject() : null;
+    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
     if (out != null && hasHeader && !mp.isEmpty()) {
       out.println();
     }
@@ -446,7 +446,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
           }
           if (jsonOutput) {
             for (TezWork.Dependency dep: (List<TezWork.Dependency>)ent.getValue()) {
-              JSONObject jsonDep = new JSONObject();
+              JSONObject jsonDep = new JSONObject(new LinkedHashMap<>());
               jsonDep.put("parent", dep.getName());
               jsonDep.put("type", dep.getType());
               json.accumulate(ent.getKey().toString(), jsonDep);
@@ -475,7 +475,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
           }
           if (jsonOutput) {
             for (SparkWork.Dependency dep: (List<SparkWork.Dependency>) ent.getValue()) {
-              JSONObject jsonDep = new JSONObject();
+              JSONObject jsonDep = new JSONObject(new LinkedHashMap<>());
               jsonDep.put("parent", dep.getName());
               jsonDep.put("type", dep.getShuffleType());
               jsonDep.put("partitions", dep.getNumPartitions());
@@ -613,7 +613,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       }
     }
 
-    JSONObject json = jsonOutput ? new JSONObject() : null;
+    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
     // If this is an operator then we need to call the plan generation on the
     // conf and then the children
     if (work instanceof Operator) {
@@ -783,7 +783,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
 
     if (jsonOutput) {
       if (keyJSONObject != null) {
-        JSONObject ret = new JSONObject();
+        JSONObject ret = new JSONObject(new LinkedHashMap<>());
         ret.put(keyJSONObject, json);
         return ret;
       }
@@ -841,7 +841,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       throws Exception {
 
     boolean first = true;
-    JSONObject json = jsonOutput ? new JSONObject() : null;
+    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
     if (out != null) {
       out.print(indentString(indent));
       out.print(task.getId());
@@ -946,7 +946,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       out.println("STAGE DEPENDENCIES:");
     }
 
-    JSONObject json = jsonOutput ? new JSONObject() : null;
+    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
     for (Task task : tasks) {
       JSONObject jsonOut = outputDependencies(task, out, json, jsonOutput, appendTaskType, 2);
       if (jsonOutput && jsonOut != null) {
@@ -965,7 +965,7 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       out.println("STAGE PLANS:");
     }
 
-    JSONObject json = jsonOutput ? new JSONObject() : null;
+    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
     for (Task task : tasks) {
       outputPlan(task, out, json, isExtended, jsonOutput, 2);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 73263ee..3a6ec1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -22,6 +22,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -289,7 +290,8 @@ public class TezTask extends Task<TezWork> {
     DAG dag = DAG.create(work.getName());
 
     // set some info for the query
-    JSONObject json = new JSONObject().put("context", "Hive").put("description", ctx.getCmd());
+    JSONObject json = new JSONObject(new LinkedHashMap()).put("context", "Hive")
+        .put("description", ctx.getCmd());
     String dagInfo = json.toString();
 
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
index 87638da..2caa7ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.hooks;
 
-import java.io.Serializable;
-import java.util.List;
+import java.util.LinkedHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -26,15 +25,11 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
-import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
@@ -161,7 +156,7 @@ public class ATSHook implements ExecuteWithHookContext {
   TimelineEntity createPreHookEvent(String queryId, String query, JSONObject explainPlan,
       long startTime, String user, String requestuser, int numMrJobs, int numTezJobs, String opId) throws Exception {
 
-    JSONObject queryObj = new JSONObject();
+    JSONObject queryObj = new JSONObject(new LinkedHashMap<>());
     queryObj.put("queryText", query);
     queryObj.put("queryPlan", explainPlan);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out b/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
index 8209c6a..b7ec209 100644
--- a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
@@ -44,4 +44,4 @@ PREHOOK: query: explain formatted authorization use default
 PREHOOK: type: SWITCHDATABASE
 POSTHOOK: query: explain formatted authorization use default
 POSTHOOK: type: SWITCHDATABASE
-{"OUTPUTS":[],"INPUTS":["database:default"],"OPERATION":"SWITCHDATABASE","CURRENT_USER":"hive_test_user"}
+{"INPUTS":["database:default"],"OUTPUTS":[],"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE"}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out b/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
index bb2bee9..b7ec209 100644
--- a/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
@@ -44,4 +44,4 @@ PREHOOK: query: explain formatted authorization use default
 PREHOOK: type: SWITCHDATABASE
 POSTHOOK: query: explain formatted authorization use default
 POSTHOOK: type: SWITCHDATABASE
-{"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE","INPUTS":["database:default"],"OUTPUTS":[]}
+{"INPUTS":["database:default"],"OUTPUTS":[],"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE"}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/explain_dependency.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_dependency.q.out b/ql/src/test/results/clientpositive/explain_dependency.q.out
index fa4dc59..dbfc482 100644
--- a/ql/src/test/results/clientpositive/explain_dependency.q.out
+++ b/ql/src/test/results/clientpositive/explain_dependency.q.out
@@ -70,14 +70,14 @@ POSTHOOK: query: -- Simple select queries, union queries and join queries
 EXPLAIN DEPENDENCY 
   SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: EXPLAIN DEPENDENCY 
   SELECT key, count(1) FROM (SELECT key, value FROM src) subq1 GROUP BY key
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY 
   SELECT key, count(1) FROM (SELECT key, value FROM src) subq1 GROUP BY key
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: EXPLAIN DEPENDENCY 
   SELECT * FROM (
     SELECT key, value FROM src UNION ALL SELECT key, value FROM srcpart WHERE ds IS NOT NULL
@@ -88,36 +88,36 @@ POSTHOOK: query: EXPLAIN DEPENDENCY
     SELECT key, value FROM src UNION ALL SELECT key, value FROM srcpart WHERE ds IS NOT NULL
   ) S1
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: EXPLAIN DEPENDENCY 
   SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY 
   SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: -- With views
 EXPLAIN DEPENDENCY SELECT * FROM V1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- With views
 EXPLAIN DEPENDENCY SELECT * FROM V1
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1]"}]}
+{"input_tables":[{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1]"}],"input_partitions":[]}
 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]}
+{"input_tables":[{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V3
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V3
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v3","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v3]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v3]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]}
+{"input_tables":[{"tablename":"default@v3","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v3]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v3]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]}
+{"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: -- The table should show up in the explain dependency even if none
 -- of the partitions are selected.
 CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'
@@ -136,4 +136,4 @@ PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V5
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V5
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@v5","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v5]"}]}
+{"input_tables":[{"tablename":"default@v5","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v5]"}],"input_partitions":[]}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/explain_dependency2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_dependency2.q.out b/ql/src/test/results/clientpositive/explain_dependency2.q.out
index cb4e6c3..7973a60 100644
--- a/ql/src/test/results/clientpositive/explain_dependency2.q.out
+++ b/ql/src/test/results/clientpositive/explain_dependency2.q.out
@@ -8,14 +8,14 @@ POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command
 -- select from a table which does not involve a map-reduce job
 EXPLAIN DEPENDENCY SELECT * FROM src
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: -- select from a table which involves a map-reduce job
 EXPLAIN DEPENDENCY SELECT count(*) FROM src
 PREHOOK: type: QUERY
 POSTHOOK: query: -- select from a table which involves a map-reduce job
 EXPLAIN DEPENDENCY SELECT count(*) FROM src
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
 -- and some partitions are being selected
 EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
@@ -24,7 +24,7 @@ POSTHOOK: query: -- select from a partitioned table which does not involve a map
 -- and some partitions are being selected
 EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job
 -- and none of the partitions are being selected
 EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
@@ -33,7 +33,7 @@ POSTHOOK: query: -- select from a partitioned table which does not involve a map
 -- and none of the partitions are being selected
 EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1'
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: -- select from a partitioned table which involves a map-reduce job
 -- and some partitions are being selected
 EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
@@ -42,7 +42,7 @@ POSTHOOK: query: -- select from a partitioned table which involves a map-reduce
 -- and some partitions are being selected
 EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null
 POSTHOOK: type: QUERY
-{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]}
 PREHOOK: query: -- select from a partitioned table which involves a map-reduce job
 -- and none of the partitions are being selected
 EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
@@ -51,7 +51,7 @@ POSTHOOK: query: -- select from a partitioned table which involves a map-reduce
 -- and none of the partitions are being selected
 EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1'
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: create table tstsrcpart like srcpart
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -66,11 +66,11 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- select from a partitioned table with no partitions which does not involve a map-reduce job
 EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}
 PREHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job
 EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
 PREHOOK: type: QUERY
 POSTHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job
 EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null
 POSTHOOK: type: QUERY
-{"input_partitions":[],"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}]}
+{"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/input4.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.java1.7.out b/ql/src/test/results/clientpositive/input4.q.java1.7.out
index dccf625..eaeedcb 100644
--- a/ql/src/test/results/clientpositive/input4.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/input4.q.java1.7.out
@@ -48,7 +48,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN FORMATTED
 SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
 POSTHOOK: type: QUERY
-{"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"ListSink":{}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}}}}},"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}}}
+{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}}}}}}}}}}
 PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
 PREHOOK: type: QUERY
 PREHOOK: Input: default@input4

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/input4.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.java1.8.out b/ql/src/test/results/clientpositive/input4.q.java1.8.out
index bc408de..eaeedcb 100644
--- a/ql/src/test/results/clientpositive/input4.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/input4.q.java1.8.out
@@ -48,7 +48,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN FORMATTED
 SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
 POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}},"outputColumnNames:":["_col0","_col1"],"expressions:":"value (type: string), key (type: string)"}}}},"limit:":"-1"}}}}
+{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}}}}}}}}}}
 PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
 PREHOOK: type: QUERY
 PREHOOK: Input: default@input4

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join0.q.java1.7.out b/ql/src/test/results/clientpositive/join0.q.java1.7.out
index fa55ffb..f7e52f6 100644
--- a/ql/src/test/results/clientpositive/join0.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/join0.q.java1.7.out
@@ -118,7 +118,7 @@ SELECT src1.key as k1, src1.value as v1,
   (SELECT * FROM src WHERE src.key < 10) src2
   SORT BY k1, v1, k2, v2
 POSTHOOK: type: QUERY
-{"STAGE PLANS":{"Stage-2":{"Map Reduce":{"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"children":{"File Output Operator":{"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}},"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}},"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (ty
 pe: string), _col3 (type: string)"}}}}]}},"Stage-1":{"Map Reduce":{"Reduce Operator Tree:":{"Join Operator":{"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"children":{"File Output Operator":{"compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe","input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"}}},"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","condition map:":[{"":"Inner Join 0 to 1"}]}},"Map Operator Tree:":[{"TableScan":{"alias:":"src","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"Reduce Output Operator":{"sort order:":"","value expressions:":"_col0 (type: string), _col1 (type: string)","Statistics:":"Num rows: 166 Data
  size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE"}},{"TableScan":{"alias:":"src","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"Reduce Output Operator":{"sort order:":"","value expressions:":"_col0 (type: string), _col1 (type: string)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats
 : COMPLETE Column stats: NONE"}}]}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}},"STAGE DEPENDENCIES":{"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}}}
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join0.q.java1.8.out b/ql/src/test/results/clientpositive/join0.q.java1.8.out
index 443f6a3..f7e52f6 100644
--- a/ql/src/test/results/clientpositive/join0.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/join0.q.java1.8.out
@@ -1,5 +1,6 @@
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
+-- SORT_QUERY_RESULTS
 
 EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
@@ -10,6 +11,7 @@ SELECT src1.key as k1, src1.value as v1,
   SORT BY k1, v1, k2, v2
 PREHOOK: type: QUERY
 POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
+-- SORT_QUERY_RESULTS
 
 EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
@@ -116,7 +118,7 @@ SELECT src1.key as k1, src1.value as v1,
   (SELECT * FROM src WHERE src.key < 10) src2
   SORT BY k1, v1, k2, v2
 POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Reduce Operator Tree:":{"Join Operator":{"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","keys:":{},"children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}},"condition map:":[{"":"Inner Join 0 to 1"}],"outputColumnNames:":["_col0","_col1","_col2","_col3"]}},"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"Statist
 ics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"value expressions:":"_col0 (type: string), _col1 (type: string)","sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"outputColumnNames:":["_col0","_col1"],"expressions:":"key (type: string), value (type: string)"}},"predicate:":"(key < 10) (type: boolean)"}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"value expressions:":"_col0 (type: string), _col1 (type: string)","sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"
 outputColumnNames:":["_col0","_col1"],"expressions:":"key (type: string), value (type: string)"}},"predicate:":"(key < 10) (type: boolean)"}}}}]}},"Stage-2":{"Map Reduce":{"Reduce Operator Tree:":{"Select Operator":{"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)"}},"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"sort order:":"++++","Statistics:":"Num rows: 182 
 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)"}}}}]}},"Stage-0":{"Fetch Operator":{"Processor Tree:":{"ListSink":{}},"limit:":"-1"}}}}
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/parallel_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_join0.q.out b/ql/src/test/results/clientpositive/parallel_join0.q.out
index 0f57a51..3aef936 100644
--- a/ql/src/test/results/clientpositive/parallel_join0.q.out
+++ b/ql/src/test/results/clientpositive/parallel_join0.q.out
@@ -116,7 +116,7 @@ SELECT src1.key as k1, src1.value as v1,
   (SELECT * FROM src WHERE src.key < 10) src2
   SORT BY k1, v1, k2, v2
 POSTHOOK: type: QUERY
-{"STAGE PLANS":{"Stage-2":{"Map Reduce":{"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"children":{"File Output Operator":{"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}},"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}},"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (ty
 pe: string), _col3 (type: string)"}}}}]}},"Stage-1":{"Map Reduce":{"Reduce Operator Tree:":{"Join Operator":{"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"children":{"File Output Operator":{"compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe","input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"}}},"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","condition map:":[{"":"Inner Join 0 to 1"}]}},"Map Operator Tree:":[{"TableScan":{"alias:":"src","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"Reduce Output Operator":{"sort order:":"","value expressions:":"_col0 (type: string), _col1 (type: string)","Statistics:":"Num rows: 166 Data
  size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE"}},{"TableScan":{"alias:":"src","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"Reduce Output Operator":{"sort order:":"","value expressions:":"_col0 (type: string), _col1 (type: string)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats
 : COMPLETE Column stats: NONE"}}]}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}},"STAGE DEPENDENCIES":{"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}}}
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out b/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
index 0979c70..2faa08a 100644
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
@@ -10,4 +10,4 @@ POSTHOOK: query: -- explain plan json:  the query gets the formatted json output
 
 EXPLAIN FORMATTED SELECT count(1) FROM src
 POSTHOOK: type: QUERY
-{"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Reduce Operator Tree:":{"Group By Operator":{"mode:":"mergepartial","aggregations:":["count(VALUE._col0)"],"outputColumnNames:":["_col0"],"children":{"File Output Operator":{"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE"}},"Map Operator Tree:":[{"TableScan":{"alias:":"src","children":{"Select Operator":{"children":{"Group By Operator":{"mode:":"hash","aggregations:":["count(1)"],"outputColumnNames:":["_col0"],"children":{"Reduce Output Operator":{"sort order:":"","value expressions:":"_col0 (type: bigint)","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
 "}},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE"}},"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE"}}]}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}},"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}}}
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out b/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
index 13842b4..2faa08a 100644
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
@@ -10,4 +10,4 @@ POSTHOOK: query: -- explain plan json:  the query gets the formatted json output
 
 EXPLAIN FORMATTED SELECT count(1) FROM src
 POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}},"outputColumnNames:":["_col0"],"expressions:":"_col0 (type: bigint)"}},"outputColumnNames:":["_col0"]}},"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size
 : 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(_col0)"],"mode:":"hash","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"value expressions:":"_col0 (type: bigint)","sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE"}},"outputColumnNames:":["_col0"]}},"outputColumnNames:":["_col0"],"expressions:":"1 (type: int)"}}}}]}},"Stage-0":{"Fetch Operator":{"Processor Tree:":{"ListSink":{}},"limit:":"-1"}}}}
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/da0be3db/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out b/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
index b2b2371..0bc964b 100644
--- a/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
+++ b/ql/src/test/results/clientpositive/tez/constprog_dpp.q.out
@@ -54,10 +54,10 @@ Stage-0
          File Output Operator [FS_16]
             compressed:false
             Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}
+            table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
             Merge Join Operator [MERGEJOIN_20]
             |  condition map:[{"":"Left Outer Join0 to 1"}]
-            |  keys:{"1":"_col0 (type: int)","0":"id (type: int)"}
+            |  keys:{"0":"id (type: int)","1":"_col0 (type: int)"}
             |  outputColumnNames:["_col0"]
             |  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             |<-Map 6 [SIMPLE_EDGE]