You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2015/11/24 21:10:31 UTC

[1/7] hive git commit: HIVE-12411: Remove counter based stats collection mechanism (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master-fixed 5f726d58e -> adbc0ab6a


http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
deleted file mode 100644
index 626dcff..0000000
--- a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
+++ /dev/null
@@ -1,465 +0,0 @@
-PREHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy
-PREHOOK: Input: default@dummy@ds=2008/hr=11
-PREHOOK: Input: default@dummy@ds=2008/hr=12
-PREHOOK: Output: default@dummy
-PREHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: Output: default@dummy@ds=2008/hr=12
-POSTHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy
-POSTHOOK: Input: default@dummy@ds=2008/hr=11
-POSTHOOK: Input: default@dummy@ds=2008/hr=12
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 11]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 12]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 11]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 12]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl
-POSTHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@tbl
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@tbl
-PREHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@dummy
-POSTHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@dummy@hr=1994
-POSTHOOK: Output: default@dummy@hr=1996
-POSTHOOK: Output: default@dummy@hr=1997
-POSTHOOK: Output: default@dummy@hr=1998
-POSTHOOK: Lineage: dummy PARTITION(hr=1994).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1996).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1997).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1998).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: describe formatted dummy partition (hr=1997)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1997)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1997]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	6                   
-	rawDataSize         	6                   
-	totalSize           	12                  
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1994)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1994)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1994]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1998)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1998)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1998]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	2                   
-	rawDataSize         	2                   
-	totalSize           	4                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1996)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1996)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1996]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table tbl
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@tbl
-POSTHOOK: query: drop table tbl
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@tbl
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
index 1fb166b..4ef71f8 100644
--- a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
+++ b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
@@ -170,9 +170,9 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
+              -mr-10004default.test1{ds=1} [test1]
             Path -> Partition:
-              -mr-10003default.test1{ds=1} 
+              -mr-10004default.test1{ds=1} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -212,7 +212,7 @@ STAGE PLANS:
                     name: default.test1
                   name: default.test1
             Truncated Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
+              -mr-10004default.test1{ds=1} [test1]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -318,9 +318,9 @@ STAGE PLANS:
                         tag: -1
                         auto parallelism: false
             Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
+              -mr-10004default.test1{ds=1} [test1]
             Path -> Partition:
-              -mr-10003default.test1{ds=1} 
+              -mr-10004default.test1{ds=1} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -360,7 +360,7 @@ STAGE PLANS:
                     name: default.test1
                   name: default.test1
             Truncated Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
+              -mr-10004default.test1{ds=1} [test1]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -1038,11 +1038,11 @@ STAGE PLANS:
                         tag: -1
                         auto parallelism: true
             Path -> Alias:
-              -mr-10003default.test2{ds=1, hr=1} [test2]
-              -mr-10004default.test2{ds=1, hr=2} [test2]
-              -mr-10005default.test2{ds=1, hr=3} [test2]
+              -mr-10004default.test2{ds=1, hr=1} [test2]
+              -mr-10005default.test2{ds=1, hr=2} [test2]
+              -mr-10006default.test2{ds=1, hr=3} [test2]
             Path -> Partition:
-              -mr-10003default.test2{ds=1, hr=1} 
+              -mr-10004default.test2{ds=1, hr=1} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1082,7 +1082,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10004default.test2{ds=1, hr=2} 
+              -mr-10005default.test2{ds=1, hr=2} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1122,7 +1122,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10005default.test2{ds=1, hr=3} 
+              -mr-10006default.test2{ds=1, hr=3} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1163,9 +1163,9 @@ STAGE PLANS:
                     name: default.test2
                   name: default.test2
             Truncated Path -> Alias:
-              -mr-10003default.test2{ds=1, hr=1} [test2]
-              -mr-10004default.test2{ds=1, hr=2} [test2]
-              -mr-10005default.test2{ds=1, hr=3} [test2]
+              -mr-10004default.test2{ds=1, hr=1} [test2]
+              -mr-10005default.test2{ds=1, hr=2} [test2]
+              -mr-10006default.test2{ds=1, hr=3} [test2]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -1521,10 +1521,10 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
-              -mr-10004default.test1{ds=2} [test1]
+              -mr-10004default.test1{ds=1} [test1]
+              -mr-10005default.test1{ds=2} [test1]
             Path -> Partition:
-              -mr-10003default.test1{ds=1} 
+              -mr-10004default.test1{ds=1} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1563,7 +1563,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test1
                   name: default.test1
-              -mr-10004default.test1{ds=2} 
+              -mr-10005default.test1{ds=2} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1603,8 +1603,8 @@ STAGE PLANS:
                     name: default.test1
                   name: default.test1
             Truncated Path -> Alias:
-              -mr-10003default.test1{ds=1} [test1]
-              -mr-10004default.test1{ds=2} [test1]
+              -mr-10004default.test1{ds=1} [test1]
+              -mr-10005default.test1{ds=2} [test1]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -1770,13 +1770,13 @@ STAGE PLANS:
                         tag: -1
                         auto parallelism: true
             Path -> Alias:
-              -mr-10003default.test2{ds=01_10_10, hr=01} [test2]
-              -mr-10004default.test2{ds=01_10_20, hr=02} [test2]
-              -mr-10005default.test2{ds=1, hr=1} [test2]
-              -mr-10006default.test2{ds=1, hr=2} [test2]
-              -mr-10007default.test2{ds=1, hr=3} [test2]
+              -mr-10004default.test2{ds=01_10_10, hr=01} [test2]
+              -mr-10005default.test2{ds=01_10_20, hr=02} [test2]
+              -mr-10006default.test2{ds=1, hr=1} [test2]
+              -mr-10007default.test2{ds=1, hr=2} [test2]
+              -mr-10008default.test2{ds=1, hr=3} [test2]
             Path -> Partition:
-              -mr-10003default.test2{ds=01_10_10, hr=01} 
+              -mr-10004default.test2{ds=01_10_10, hr=01} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1816,7 +1816,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10004default.test2{ds=01_10_20, hr=02} 
+              -mr-10005default.test2{ds=01_10_20, hr=02} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1856,7 +1856,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10005default.test2{ds=1, hr=1} 
+              -mr-10006default.test2{ds=1, hr=1} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1896,7 +1896,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10006default.test2{ds=1, hr=2} 
+              -mr-10007default.test2{ds=1, hr=2} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1936,7 +1936,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.test2
                   name: default.test2
-              -mr-10007default.test2{ds=1, hr=3} 
+              -mr-10008default.test2{ds=1, hr=3} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1977,11 +1977,11 @@ STAGE PLANS:
                     name: default.test2
                   name: default.test2
             Truncated Path -> Alias:
-              -mr-10003default.test2{ds=01_10_10, hr=01} [test2]
-              -mr-10004default.test2{ds=01_10_20, hr=02} [test2]
-              -mr-10005default.test2{ds=1, hr=1} [test2]
-              -mr-10006default.test2{ds=1, hr=2} [test2]
-              -mr-10007default.test2{ds=1, hr=3} [test2]
+              -mr-10004default.test2{ds=01_10_10, hr=01} [test2]
+              -mr-10005default.test2{ds=01_10_20, hr=02} [test2]
+              -mr-10006default.test2{ds=1, hr=1} [test2]
+              -mr-10007default.test2{ds=1, hr=2} [test2]
+              -mr-10008default.test2{ds=1, hr=3} [test2]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
index bf9ba9b..f907ed7 100644
--- a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
@@ -274,9 +274,9 @@ STAGE PLANS:
                         tag: 0
                         auto parallelism: true
             Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -320,7 +320,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -341,12 +341,12 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} [srcpart]
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} [srcpart]
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart]
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart]
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} [srcpart]
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} [srcpart]
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} [srcpart]
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} [srcpart]
             Path -> Partition:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} 
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -391,7 +391,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} 
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -436,7 +436,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} 
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -481,7 +481,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} 
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -527,10 +527,10 @@ STAGE PLANS:
                     name: default.srcpart
                   name: default.srcpart
             Truncated Path -> Alias:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} [srcpart]
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} [srcpart]
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart]
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart]
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} [srcpart]
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} [srcpart]
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} [srcpart]
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} [srcpart]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -695,9 +695,9 @@ STAGE PLANS:
                         value expressions: _col0 (type: bigint)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -741,7 +741,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1132,9 +1132,9 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1178,7 +1178,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -1199,12 +1199,12 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} [srcpart]
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} [srcpart]
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart]
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart]
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} [srcpart]
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} [srcpart]
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} [srcpart]
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} [srcpart]
             Path -> Partition:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} 
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1249,7 +1249,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} 
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1294,7 +1294,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} 
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1339,7 +1339,7 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.srcpart
                   name: default.srcpart
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} 
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1385,10 +1385,10 @@ STAGE PLANS:
                     name: default.srcpart
                   name: default.srcpart
             Truncated Path -> Alias:
-              -mr-10003default.srcpart{ds=2008-04-08, hr=11} [srcpart]
-              -mr-10004default.srcpart{ds=2008-04-08, hr=12} [srcpart]
-              -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart]
-              -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart]
+              -mr-10004default.srcpart{ds=2008-04-08, hr=11} [srcpart]
+              -mr-10005default.srcpart{ds=2008-04-08, hr=12} [srcpart]
+              -mr-10006default.srcpart{ds=2008-04-09, hr=11} [srcpart]
+              -mr-10007default.srcpart{ds=2008-04-09, hr=12} [srcpart]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -1645,9 +1645,9 @@ STAGE PLANS:
                       tag: 0
                       auto parallelism: true
             Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1691,7 +1691,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -1832,9 +1832,9 @@ STAGE PLANS:
                       value expressions: key (type: string)
                       auto parallelism: true
             Path -> Alias:
-              -mr-10002default.src{} [s1]
+              -mr-10003default.src{} [s1]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1878,7 +1878,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [s1]
+              -mr-10003default.src{} [s1]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -1898,9 +1898,9 @@ STAGE PLANS:
                       value expressions: key (type: string)
                       auto parallelism: true
             Path -> Alias:
-              -mr-10003default.src{} [s2]
+              -mr-10004default.src{} [s2]
             Path -> Partition:
-              -mr-10003default.src{} 
+              -mr-10004default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1944,7 +1944,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10003default.src{} [s2]
+              -mr-10004default.src{} [s2]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:
@@ -2059,9 +2059,9 @@ STAGE PLANS:
                         value expressions: _col0 (type: bigint)
                         auto parallelism: false
             Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
             Path -> Partition:
-              -mr-10002default.src{} 
+              -mr-10003default.src{} 
                 Partition
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2105,7 +2105,7 @@ STAGE PLANS:
                     name: default.src
                   name: default.src
             Truncated Path -> Alias:
-              -mr-10002default.src{} [src]
+              -mr-10003default.src{} [src]
         Reducer 2 
             Needs Tagging: false
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/tez/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_counter.q.out b/ql/src/test/results/clientpositive/tez/stats_counter.q.out
deleted file mode 100644
index 8b3dcea..0000000
--- a/ql/src/test/results/clientpositive/tez/stats_counter.q.out
+++ /dev/null
@@ -1,102 +0,0 @@
-PREHOOK: query: -- by analyze
-create table dummy1 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: -- by analyze
-create table dummy1 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: analyze table dummy1 compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy1
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: analyze table dummy1 compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy1
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: desc formatted dummy1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy1
-POSTHOOK: query: desc formatted dummy1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- by autogather
-create table dummy2 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy2
-POSTHOOK: query: -- by autogather
-create table dummy2 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy2
-PREHOOK: query: desc formatted dummy2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy2
-POSTHOOK: query: desc formatted dummy2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy2
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
deleted file mode 100644
index 626dcff..0000000
--- a/ql/src/test/results/clientpositive/tez/stats_counter_partitioned.q.out
+++ /dev/null
@@ -1,465 +0,0 @@
-PREHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy
-PREHOOK: Input: default@dummy@ds=2008/hr=11
-PREHOOK: Input: default@dummy@ds=2008/hr=12
-PREHOOK: Output: default@dummy
-PREHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: Output: default@dummy@ds=2008/hr=12
-POSTHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy
-POSTHOOK: Input: default@dummy@ds=2008/hr=11
-POSTHOOK: Input: default@dummy@ds=2008/hr=12
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 11]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 12]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 11]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 12]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl
-POSTHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@tbl
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@tbl
-PREHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@dummy
-POSTHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@dummy@hr=1994
-POSTHOOK: Output: default@dummy@hr=1996
-POSTHOOK: Output: default@dummy@hr=1997
-POSTHOOK: Output: default@dummy@hr=1998
-POSTHOOK: Lineage: dummy PARTITION(hr=1994).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1996).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1997).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1998).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: describe formatted dummy partition (hr=1997)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1997)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1997]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	6                   
-	rawDataSize         	6                   
-	totalSize           	12                  
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1994)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1994)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1994]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1998)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1998)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1998]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	2                   
-	rawDataSize         	2                   
-	totalSize           	4                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1996)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1996)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1996]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table tbl
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@tbl
-POSTHOOK: query: drop table tbl
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@tbl
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy


[2/7] hive git commit: HIVE-12411: Remove counter based stats collection mechanism (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12411: Remove counter based stats collection mechanism (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/884ff9ca
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/884ff9ca
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/884ff9ca

Branch: refs/heads/master-fixed
Commit: 884ff9caf4dff634a3501ac6f3daa689d1ce751a
Parents: 6e429d8
Author: Pengcheng Xiong <px...@apache.org>
Authored: Tue Nov 24 17:09:40 2015 +0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:09 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/common/StatsSetupConst.java     |  13 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   8 +-
 data/conf/llap/hive-site.xml                    |   2 +-
 data/conf/spark/standalone/hive-site.xml        |   2 +-
 data/conf/spark/yarn-client/hive-site.xml       |   2 +-
 data/conf/tez/hive-site.xml                     |   4 +-
 .../hive/ql/stats/CounterStatsAggregator.java   |  82 ----
 .../ql/stats/CounterStatsAggregatorSpark.java   |  58 ---
 .../ql/stats/CounterStatsAggregatorTez.java     |  79 ----
 .../hive/ql/stats/CounterStatsPublisher.java    |  66 ---
 .../hadoop/hive/ql/stats/StatsFactory.java      |  11 -
 .../test/queries/clientpositive/index_bitmap3.q |   1 -
 .../queries/clientpositive/index_bitmap_auto.q  |   1 -
 .../test/queries/clientpositive/stats_counter.q |  16 -
 .../clientpositive/stats_counter_partitioned.q  |  45 --
 .../clientpositive/llap/stats_counter.q.out     | 102 ----
 .../llap/stats_counter_partitioned.q.out        | 465 -------------------
 .../clientpositive/spark/stats_counter.q.out    | 102 ----
 .../spark/stats_counter_partitioned.q.out       | 465 -------------------
 .../results/clientpositive/stats_counter.q.out  | 102 ----
 .../stats_counter_partitioned.q.out             | 465 -------------------
 .../clientpositive/tez/metadataonly1.q.out      |  72 +--
 .../clientpositive/tez/optimize_nullscan.q.out  |  90 ++--
 .../clientpositive/tez/stats_counter.q.out      | 102 ----
 .../tez/stats_counter_partitioned.q.out         | 465 -------------------
 25 files changed, 88 insertions(+), 2732 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 0a44bde..2ff76ee 100644
--- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -31,19 +31,6 @@ import java.util.Map;
 public class StatsSetupConst {
 
   public enum StatDB {
-    counter {
-      @Override
-      public String getPublisher(Configuration conf) {
-        return "org.apache.hadoop.hive.ql.stats.CounterStatsPublisher"; }
-      @Override
-      public String getAggregator(Configuration conf) {
-        if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-          return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregatorTez";
-        } else if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
-          return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregatorSpark";
-        }
-        return "org.apache.hadoop.hive.ql.stats.CounterStatsAggregator"; }
-    },
     fs {
       @Override
       public String getPublisher(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f48403b..fffedd9 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1354,10 +1354,10 @@ public class HiveConf extends Configuration {
     // Statistics
     HIVESTATSAUTOGATHER("hive.stats.autogather", true,
         "A flag to gather statistics automatically during the INSERT OVERWRITE command."),
-    HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("counter", "custom", "fs"),
+    HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
         "The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
         "each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
-        "after the job has finished. Supported values are fs (filesystem), counter, and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
+        "after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
     HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
         "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
     HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
@@ -1398,10 +1398,6 @@ public class HiveConf extends Configuration {
     HIVE_STATS_KEY_PREFIX_MAX_LENGTH("hive.stats.key.prefix.max.length", 150,
         "Determines if when the prefix of the key used for intermediate stats collection\n" +
         "exceeds a certain length, a hash of the key is used instead.  If the value < 0 then hashing"),
-    HIVE_STATS_KEY_PREFIX_RESERVE_LENGTH("hive.stats.key.prefix.reserve.length", 24,
-        "Reserved length for postfix of stats key. Currently only meaningful for counter type which should\n" +
-        "keep length of full stats key smaller than max length configured by hive.stats.key.prefix.max.length.\n" +
-        "For counter type, it should be bigger than the length of LB spec if exists."),
     HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
     // if length of variable length data type cannot be determined this length will be used.
     HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100,

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/data/conf/llap/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml
index 9e15eda..4bf034a 100644
--- a/data/conf/llap/hive-site.xml
+++ b/data/conf/llap/hive-site.xml
@@ -219,7 +219,7 @@
 <property>
   <name>hive.stats.dbclass</name>
   <value>fs</value>
-  <description>The default storatge that stores temporary hive statistics. Currently, jdbc, hbase and counter type is supported</description>
+  <description>The default storatge that stores temporary hive statistics. Currently, fs type is supported</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/data/conf/spark/standalone/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/spark/standalone/hive-site.xml b/data/conf/spark/standalone/hive-site.xml
index 1a45274..459b52e 100644
--- a/data/conf/spark/standalone/hive-site.xml
+++ b/data/conf/spark/standalone/hive-site.xml
@@ -195,7 +195,7 @@
 <property>
   <name>hive.stats.dbclass</name>
   <value>fs</value>
-  <description>The default storatge that stores temporary hive statistics. Currently, jdbc, hbase and counter type is supported</description>
+  <description>The default storatge that stores temporary hive statistics. Currently, fs type is supported</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/data/conf/spark/yarn-client/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/spark/yarn-client/hive-site.xml b/data/conf/spark/yarn-client/hive-site.xml
index 346c248..bdab294 100644
--- a/data/conf/spark/yarn-client/hive-site.xml
+++ b/data/conf/spark/yarn-client/hive-site.xml
@@ -195,7 +195,7 @@
 <property>
   <name>hive.stats.dbclass</name>
   <value>fs</value>
-  <description>The default storatge that stores temporary hive statistics. Currently, jdbc, hbase and counter type is supported</description>
+  <description>The default storatge that stores temporary hive statistics. Currently, fs type is supported</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/data/conf/tez/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/tez/hive-site.xml b/data/conf/tez/hive-site.xml
index 9e8b04f..ef48d82 100644
--- a/data/conf/tez/hive-site.xml
+++ b/data/conf/tez/hive-site.xml
@@ -218,8 +218,8 @@
 
 <property>
   <name>hive.stats.dbclass</name>
-  <value>counter</value>
-  <description>The default storatge that stores temporary hive statistics. Currently, jdbc, hbase and counter type is supported</description>
+  <value>fs</value>
+  <description>The default storatge that stores temporary hive statistics. Currently, fs type is supported</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
deleted file mode 100644
index 9b66024..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.stats;
-
-import java.io.IOException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
-import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RunningJob;
-
-public class CounterStatsAggregator implements StatsAggregator {
-
-  private static final Logger LOG = LoggerFactory.getLogger(CounterStatsAggregator.class.getName());
-
-  private Counters counters;
-  private JobClient jc;
-
-  @Override
-  public boolean connect(StatsCollectionContext scc) {
-    Task<?> sourceTask = scc.getTask();
-    if (sourceTask instanceof MapRedTask) {
-      try {
-        jc = new JobClient(toJobConf(scc.getHiveConf()));
-        RunningJob job = jc.getJob(((MapRedTask)sourceTask).getJobID());
-        if (job != null) {
-          counters = job.getCounters();
-        }
-      } catch (Exception e) {
-        LOG.error("Failed to get Job instance for " + sourceTask.getJobID(),e);
-      }
-    }
-    return counters != null;
-  }
-
-  private JobConf toJobConf(Configuration hconf) {
-    return hconf instanceof JobConf ? (JobConf)hconf : new JobConf(hconf, ExecDriver.class);
-  }
-
-  @Override
-  public String aggregateStats(String counterGrpName, String statType) {
-    long value = 0;
-    if (counters != null) {
-      // In case of counters, aggregation is done by JobTracker / MR AM itself
-      // so no need to aggregate, simply return the counter value for requested stat.
-      value = counters.getGroup(counterGrpName).getCounter(statType);
-    }
-    return String.valueOf(value);
-  }
-
-  @Override
-  public boolean closeConnection(StatsCollectionContext scc) {
-    try {
-      jc.close();
-    } catch (IOException e) {
-      LOG.error("Error closing job client for stats aggregator.", e);
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
deleted file mode 100644
index 7ac01a7..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorSpark.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.stats;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
-import org.apache.hive.spark.counter.SparkCounters;
-
-public class CounterStatsAggregatorSpark
-  implements StatsAggregator {
-
-  private static final Logger LOG = LoggerFactory.getLogger(CounterStatsAggregatorSpark.class);
-
-  private SparkCounters sparkCounters;
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  public boolean connect(StatsCollectionContext scc) {
-    SparkTask task = (SparkTask) scc.getTask();
-    sparkCounters = task.getSparkCounters();
-    if (sparkCounters == null) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public String aggregateStats(String keyPrefix, String statType) {
-    long value = sparkCounters.getValue(keyPrefix, statType);
-    String result = String.valueOf(value);
-    LOG.info(
-      String.format("Counter based stats for (%s, %s) are: %s", keyPrefix, statType, result));
-    return result;
-  }
-
-  @Override
-  public boolean closeConnection(StatsCollectionContext scc) {
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
deleted file mode 100644
index bb51fea..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregatorTez.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.stats;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.tez.TezTask;
-import org.apache.tez.common.counters.TezCounters;
-
-/**
- * This class aggregates stats via counters and does so for Tez Tasks.
- * With dbclass=counters this class will compute table/partition statistics
- * using hadoop counters. They will be published using special keys and
- * then retrieved on the client after the insert/ctas statement ran.
- */
-public class CounterStatsAggregatorTez implements StatsAggregator {
-
-  private static final Logger LOG = LoggerFactory.getLogger(CounterStatsAggregatorTez.class.getName());
-
-  private TezCounters counters;
-  private final CounterStatsAggregator mrAggregator;
-  private boolean delegate;
-
-  public CounterStatsAggregatorTez() {
-    mrAggregator = new CounterStatsAggregator();
-  }
-
-  @Override
-  public boolean connect(StatsCollectionContext scc) {
-    Task sourceTask = scc.getTask();
-    if (!(sourceTask instanceof TezTask)) {
-      delegate = true;
-      return mrAggregator.connect(scc);
-    }
-    counters = ((TezTask) sourceTask).getTezCounters();
-    return counters != null;
-  }
-
-  @Override
-  public String aggregateStats(String keyPrefix, String statType) {
-    String result;
-
-    if (delegate) {
-      result = mrAggregator.aggregateStats(keyPrefix, statType);
-    } else {
-      long value = 0;
-      for (String groupName : counters.getGroupNames()) {
-        if (groupName.startsWith(keyPrefix)) {
-          value += counters.getGroup(groupName).findCounter(statType).getValue();
-        }
-      }
-      result = String.valueOf(value);
-    }
-    LOG.info("Counter based stats for ("+keyPrefix+") are: "+result);
-    return result;
-  }
-
-  @Override
-  public boolean closeConnection(StatsCollectionContext scc) {
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
deleted file mode 100644
index ab3d3cf..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsPublisher.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.stats;
-
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.MapredContext;
-import org.apache.hadoop.mapred.Reporter;
-
-public class CounterStatsPublisher implements StatsPublisher {
-
-  private static final Logger LOG = LoggerFactory.getLogger(CounterStatsPublisher.class.getName());
-
-  private Reporter reporter;
-
-  @Override
-  public boolean init(StatsCollectionContext context) {
-    return true;
-  }
-
-  @Override
-  public boolean connect(StatsCollectionContext statsContext) {
-    MapredContext context = MapredContext.get();
-    if (context == null || context.getReporter() == null) {
-      return false;
-    }
-    reporter = context.getReporter();
-    return true;
-  }
-
-  @Override
-  public boolean publishStat(String fileID, Map<String, String> stats) {
-    for (Map.Entry<String, String> entry : stats.entrySet()) {
-      try {
-        reporter.incrCounter(fileID, entry.getKey(), Long.valueOf(entry.getValue()));
-      } catch (Exception e) {
-        LOG.error("Failed to increment counter value " + entry.getValue() + " for " + entry.getKey()
-          + ": " + e, e);
-        return false;
-      }
-    }
-    return true;
-  }
-  @Override
-  public boolean closeConnection(StatsCollectionContext context) {
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
index a53fcc0..9f4ed67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX_MAX_LENGTH;
-import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX_RESERVE_LENGTH;
 
 /**
  * A factory of stats publisher and aggregator implementations of the
@@ -51,16 +50,6 @@ public final class StatsFactory {
       return -1;
     }
     int maxPrefixLength = HiveConf.getIntVar(conf, HIVE_STATS_KEY_PREFIX_MAX_LENGTH);
-    if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.counter.name())) {
-      // see org.apache.hadoop.mapred.Counter or org.apache.hadoop.mapreduce.MRJobConfig
-      int groupNameMax = conf.getInt("mapreduce.job.counters.group.name.max", 128);
-      maxPrefixLength = maxPrefixLength < 0 ? groupNameMax :
-          Math.min(maxPrefixLength, groupNameMax);
-    }
-    if (maxPrefixLength > 0) {
-      int reserve = HiveConf.getIntVar(conf, HIVE_STATS_KEY_PREFIX_RESERVE_LENGTH);
-      return reserve < 0 ? maxPrefixLength : maxPrefixLength - reserve;
-    }
     return maxPrefixLength;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/queries/clientpositive/index_bitmap3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap3.q b/ql/src/test/queries/clientpositive/index_bitmap3.q
index ed29af9..89d363c 100644
--- a/ql/src/test/queries/clientpositive/index_bitmap3.q
+++ b/ql/src/test/queries/clientpositive/index_bitmap3.q
@@ -1,4 +1,3 @@
-set hive.stats.dbclass=counter;
 set hive.stats.autogather=true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/queries/clientpositive/index_bitmap_auto.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap_auto.q b/ql/src/test/queries/clientpositive/index_bitmap_auto.q
index 2d434d1..2824094 100644
--- a/ql/src/test/queries/clientpositive/index_bitmap_auto.q
+++ b/ql/src/test/queries/clientpositive/index_bitmap_auto.q
@@ -1,4 +1,3 @@
-set hive.stats.dbclass=counter;
 set hive.stats.autogather=true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/queries/clientpositive/stats_counter.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_counter.q b/ql/src/test/queries/clientpositive/stats_counter.q
deleted file mode 100644
index 3c1f132..0000000
--- a/ql/src/test/queries/clientpositive/stats_counter.q
+++ /dev/null
@@ -1,16 +0,0 @@
-set hive.stats.dbclass=counter;
-set hive.stats.autogather=false;
-
--- by analyze
-create table dummy1 as select * from src;
-
-analyze table dummy1 compute statistics;
-desc formatted dummy1;
-
-set hive.stats.dbclass=counter;
-set hive.stats.autogather=true;
-
--- by autogather
-create table dummy2 as select * from src;
-
-desc formatted dummy2;

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_counter_partitioned.q b/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
deleted file mode 100644
index e1274c0..0000000
--- a/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
+++ /dev/null
@@ -1,45 +0,0 @@
-set hive.stats.dbclass=counter;
-set hive.stats.autogather=true;
-set hive.exec.dynamic.partition.mode=nonstrict;
-
--- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string);
-
-load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12');
-load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11');
-
-analyze table dummy partition (ds,hr) compute statistics;
-describe formatted dummy partition (ds='2008', hr='11');
-describe formatted dummy partition (ds='2008', hr='12');
-
-drop table dummy;
-
--- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string);
-
-insert overwrite table dummy partition (ds='10',hr='11') select * from src;
-insert overwrite table dummy partition (ds='10',hr='12') select * from src;
-
-describe formatted dummy partition (ds='10', hr='11');
-describe formatted dummy partition (ds='10', hr='12');
-
-drop table dummy;
-
--- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int);
-                                                                                                      
-CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|';
-LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl;                           
-                                                                                                      
-insert overwrite table dummy partition (hr) select * from tbl;
-
-describe formatted dummy partition (hr=1997);
-describe formatted dummy partition (hr=1994);
-describe formatted dummy partition (hr=1998);
-describe formatted dummy partition (hr=1996);
-
-drop table tbl;
-drop table dummy; 

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/llap/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/stats_counter.q.out b/ql/src/test/results/clientpositive/llap/stats_counter.q.out
deleted file mode 100644
index 8b3dcea..0000000
--- a/ql/src/test/results/clientpositive/llap/stats_counter.q.out
+++ /dev/null
@@ -1,102 +0,0 @@
-PREHOOK: query: -- by analyze
-create table dummy1 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: -- by analyze
-create table dummy1 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: analyze table dummy1 compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy1
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: analyze table dummy1 compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy1
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: desc formatted dummy1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy1
-POSTHOOK: query: desc formatted dummy1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- by autogather
-create table dummy2 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy2
-POSTHOOK: query: -- by autogather
-create table dummy2 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy2
-PREHOOK: query: desc formatted dummy2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy2
-POSTHOOK: query: desc formatted dummy2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy2
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/llap/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/llap/stats_counter_partitioned.q.out
deleted file mode 100644
index 626dcff..0000000
--- a/ql/src/test/results/clientpositive/llap/stats_counter_partitioned.q.out
+++ /dev/null
@@ -1,465 +0,0 @@
-PREHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy
-PREHOOK: Input: default@dummy@ds=2008/hr=11
-PREHOOK: Input: default@dummy@ds=2008/hr=12
-PREHOOK: Output: default@dummy
-PREHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: Output: default@dummy@ds=2008/hr=12
-POSTHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy
-POSTHOOK: Input: default@dummy@ds=2008/hr=11
-POSTHOOK: Input: default@dummy@ds=2008/hr=12
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 11]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 12]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 11]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 12]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl
-POSTHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@tbl
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@tbl
-PREHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@dummy
-POSTHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@dummy@hr=1994
-POSTHOOK: Output: default@dummy@hr=1996
-POSTHOOK: Output: default@dummy@hr=1997
-POSTHOOK: Output: default@dummy@hr=1998
-POSTHOOK: Lineage: dummy PARTITION(hr=1994).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1996).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1997).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1998).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: describe formatted dummy partition (hr=1997)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1997)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1997]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	6                   
-	rawDataSize         	6                   
-	totalSize           	12                  
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1994)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1994)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1994]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1998)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1998)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1998]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	2                   
-	rawDataSize         	2                   
-	totalSize           	4                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1996)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1996)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1996]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	1                   
-	rawDataSize         	1                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table tbl
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@tbl
-POSTHOOK: query: drop table tbl
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@tbl
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/spark/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_counter.q.out b/ql/src/test/results/clientpositive/spark/stats_counter.q.out
deleted file mode 100644
index 8b3dcea..0000000
--- a/ql/src/test/results/clientpositive/spark/stats_counter.q.out
+++ /dev/null
@@ -1,102 +0,0 @@
-PREHOOK: query: -- by analyze
-create table dummy1 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: -- by analyze
-create table dummy1 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: analyze table dummy1 compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy1
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: analyze table dummy1 compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy1
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: desc formatted dummy1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy1
-POSTHOOK: query: desc formatted dummy1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- by autogather
-create table dummy2 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy2
-POSTHOOK: query: -- by autogather
-create table dummy2 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy2
-PREHOOK: query: desc formatted dummy2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy2
-POSTHOOK: query: desc formatted dummy2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy2
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
deleted file mode 100644
index 12e1fbe..0000000
--- a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
+++ /dev/null
@@ -1,465 +0,0 @@
-PREHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- partitioned table analyze 
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@dummy
-POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy
-PREHOOK: Input: default@dummy@ds=2008/hr=11
-PREHOOK: Input: default@dummy@ds=2008/hr=12
-PREHOOK: Output: default@dummy
-PREHOOK: Output: default@dummy@ds=2008/hr=11
-PREHOOK: Output: default@dummy@ds=2008/hr=12
-POSTHOOK: query: analyze table dummy partition (ds,hr) compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy
-POSTHOOK: Input: default@dummy@ds=2008/hr=11
-POSTHOOK: Input: default@dummy@ds=2008/hr=12
-POSTHOOK: Output: default@dummy
-POSTHOOK: Output: default@dummy@ds=2008/hr=11
-POSTHOOK: Output: default@dummy@ds=2008/hr=12
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 11]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008, 12]          	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- static partitioned table on insert
-
-create table dummy (key string, value string) partitioned by (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='11') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=11
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: query: insert overwrite table dummy partition (ds='10',hr='12') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dummy@ds=10/hr=12
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dummy PARTITION(ds=10,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 11]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (ds='10', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[10, 12]            	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy
-PREHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy
-POSTHOOK: query: -- dynamic partitioned table on insert
-
-create table dummy (key int) partitioned by (hr int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy
-PREHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl
-POSTHOOK: query: CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@tbl
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@tbl
-PREHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@dummy
-POSTHOOK: query: insert overwrite table dummy partition (hr) select * from tbl
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@dummy@hr=1994
-POSTHOOK: Output: default@dummy@hr=1996
-POSTHOOK: Output: default@dummy@hr=1997
-POSTHOOK: Output: default@dummy@hr=1998
-POSTHOOK: Lineage: dummy PARTITION(hr=1994).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1996).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1997).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: dummy PARTITION(hr=1998).key SIMPLE [(tbl)tbl.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: describe formatted dummy partition (hr=1997)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1997)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1997]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	12                  
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1994)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1994)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1994]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1998)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1998)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1998]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	4                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted dummy partition (hr=1996)
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy
-POSTHOOK: query: describe formatted dummy partition (hr=1996)
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy
-# col_name            	data_type           	comment             
-	 	 
-key                 	int                 	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-hr                  	int                 	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1996]              	 
-Database:           	default             	 
-Table:              	dummy               	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	2                   
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table tbl
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl
-PREHOOK: Output: default@tbl
-POSTHOOK: query: drop table tbl
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl
-POSTHOOK: Output: default@tbl
-PREHOOK: query: drop table dummy
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@dummy
-PREHOOK: Output: default@dummy
-POSTHOOK: query: drop table dummy
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@dummy
-POSTHOOK: Output: default@dummy

http://git-wip-us.apache.org/repos/asf/hive/blob/884ff9ca/ql/src/test/results/clientpositive/stats_counter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_counter.q.out b/ql/src/test/results/clientpositive/stats_counter.q.out
deleted file mode 100644
index 8b3dcea..0000000
--- a/ql/src/test/results/clientpositive/stats_counter.q.out
+++ /dev/null
@@ -1,102 +0,0 @@
-PREHOOK: query: -- by analyze
-create table dummy1 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: -- by analyze
-create table dummy1 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: analyze table dummy1 compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy1
-PREHOOK: Output: default@dummy1
-POSTHOOK: query: analyze table dummy1 compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy1
-POSTHOOK: Output: default@dummy1
-PREHOOK: query: desc formatted dummy1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy1
-POSTHOOK: query: desc formatted dummy1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- by autogather
-create table dummy2 as select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy2
-POSTHOOK: query: -- by autogather
-create table dummy2 as select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy2
-PREHOOK: query: desc formatted dummy2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@dummy2
-POSTHOOK: query: desc formatted dummy2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@dummy2
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   


[6/7] hive git commit: HIVE-12509: Regenerate q files after HIVE-12017 went in (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12509: Regenerate q files after HIVE-12017 went in (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e34588e1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e34588e1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e34588e1

Branch: refs/heads/master-fixed
Commit: e34588e1b2c7fca678ec47e4659aca2bbc0a2ce4
Parents: 884ff9c
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Nov 24 17:08:20 2015 +0100
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:10 2015 -0800

----------------------------------------------------------------------
 .../results/clientpositive/subquery_notin.q.out | 328 ++++++++++---------
 .../subquery_notin_having.q.java1.7.out         |  94 +++---
 .../clientpositive/tez/explainuser_1.q.out      | 171 +++++-----
 .../tez/tez_dynpart_hashjoin_3.q.out            | 150 ++++-----
 .../clientpositive/tez/tez_smb_empty.q.out      |  38 ++-
 5 files changed, 398 insertions(+), 383 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e34588e1/ql/src/test/results/clientpositive/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out
index 552999d..56553fd 100644
--- a/ql/src/test/results/clientpositive/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notin.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr
 explain
 select * 
@@ -18,70 +18,64 @@ where src.key not in
   )
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col1 (type: string)
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (key > '2') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '2') and key is null) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: _col2 is null (type: boolean)
-            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-2
+  Stage: Stage-1
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string)
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                sort order: 
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: string), _col1 (type: string)
           TableScan
             Reduce Output Operator
               sort order: 
@@ -94,52 +88,62 @@ STAGE PLANS:
             0 
             1 
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
             table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-4
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: string)
+          TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              predicate: (key > '2') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            predicate: _col3 is null (type: boolean)
+            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -147,7 +151,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * 
 from src 
 where src.key not in  ( select key from src s1 where s1.key > '2')
@@ -1243,7 +1247,7 @@ Manufacturer#5	almond antique medium spring khaki	6
 Manufacturer#5	almond azure blanched chiffon midnight	23
 Manufacturer#5	almond antique blue firebrick mint	31
 Manufacturer#5	almond aquamarine dodger light gainsboro	46
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr, Group By in Parent Query
 select li.l_partkey, count(*) 
 from lineitem li 
@@ -1278,7 +1282,7 @@ POSTHOOK: Input: default@lineitem
 139636	1
 175839	1
 182052	1
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- alternate not in syntax
 select * 
 from src 
@@ -1442,7 +1446,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@t1_v
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2_v
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
@@ -1452,12 +1456,50 @@ select *
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key < '11') and CASE WHEN ((key > '104')) THEN (null) ELSE (key) END is null) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
   Stage: Stage-1
     Map Reduce
       Map Operator Tree:
@@ -1472,52 +1514,9 @@ STAGE PLANS:
                 outputColumnNames: _col0
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
+                  sort order: 
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key < '11') and (CASE WHEN ((key > '104')) THEN (null) ELSE (key) END < '11')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: CASE WHEN ((key > '104')) THEN (null) ELSE (key) END (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col1 is null (type: boolean)
-            Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string)
+                  value expressions: _col0 (type: string)
           TableScan
             Reduce Output Operator
               sort order: 
@@ -1530,52 +1529,61 @@ STAGE PLANS:
             0 
             1 
           outputColumnNames: _col0
-          Statistics: Num rows: 100 Data size: 1065 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 100 Data size: 1065 Basic stats: COMPLETE Column stats: NONE
             table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-4
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+          TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < '11') and CASE WHEN ((key > '104')) THEN (null) ELSE (key) END is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '11') and (CASE WHEN ((key > '104')) THEN (null) ELSE (key) END < '11')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
+                expressions: CASE WHEN ((key > '104')) THEN (null) ELSE (key) END (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col2
+          Statistics: Num rows: 200 Data size: 2132 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            predicate: _col2 is null (type: boolean)
+            Statistics: Num rows: 100 Data size: 1066 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              expressions: _col0 (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 100 Data size: 1066 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 100 Data size: 1066 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1583,7 +1591,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/e34588e1/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
index d9550e4..c08e2b9 100644
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr
 -- JAVA_VERSION_SPECIFIC_OUTPUT
 
@@ -25,9 +25,9 @@ having key not in
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2, Stage-5
-  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-4
+  Stage-3 depends on stages: Stage-2
+  Stage-4 is a root stage
   Stage-0 depends on stages: Stage-3
 
 STAGE PLANS:
@@ -72,10 +72,38 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col1 (type: bigint)
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)
           TableScan
             alias: src
@@ -99,48 +127,24 @@ STAGE PLANS:
           keys:
             0 _col0 (type: string)
             1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: _col2 is null (type: boolean)
-            Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            predicate: _col3 is null (type: boolean)
+            Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: bigint)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/e34588e1/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index ad11df1..a3d1f87 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -3732,105 +3732,108 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
-Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 7 (SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+Reducer 3 <- Map 7 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
 Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
-Reducer 7 <- Map 6 (SIMPLE_EDGE)
+Reducer 6 <- Map 5 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:-1
       Stage-1
          Reducer 4
-         File Output Operator [FS_29]
+         File Output Operator [FS_28]
             compressed:false
-            Statistics:Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: NONE
+            Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Select Operator [SEL_28]
+            Select Operator [SEL_27]
             |  outputColumnNames:["_col0","_col1"]
-            |  Statistics:Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: NONE
+            |  Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
             |<-Reducer 3 [SIMPLE_EDGE]
-               Reduce Output Operator [RS_27]
+               Reduce Output Operator [RS_26]
                   key expressions:_col0 (type: string)
                   sort order:+
-                  Statistics:Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: NONE
+                  Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
                   value expressions:_col1 (type: string)
-                  Merge Join Operator [MERGEJOIN_37]
-                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                  |  keys:{}
-                  |  outputColumnNames:["_col0","_col1"]
-                  |  Statistics:Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: NONE
-                  |<-Reducer 2 [SIMPLE_EDGE]
-                  |  Reduce Output Operator [RS_21]
-                  |     sort order:
-                  |     Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
-                  |     value expressions:_col0 (type: string), _col1 (type: string)
-                  |     Filter Operator [FIL_32]
-                  |        predicate:_col2 is null (type: boolean)
-                  |        Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
-                  |        Merge Join Operator [MERGEJOIN_36]
-                  |        |  condition map:[{"":"Left Outer Join0 to 1"}]
-                  |        |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                  |        |  outputColumnNames:["_col0","_col1","_col2"]
-                  |        |  Statistics:Num rows: 404 Data size: 107060 Basic stats: COMPLETE Column stats: COMPLETE
-                  |        |<-Map 1 [SIMPLE_EDGE]
-                  |        |  Reduce Output Operator [RS_18]
-                  |        |     key expressions:_col0 (type: string)
-                  |        |     Map-reduce partition columns:_col0 (type: string)
-                  |        |     sort order:+
-                  |        |     Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  |        |     value expressions:_col1 (type: string)
-                  |        |     Select Operator [SEL_2]
-                  |        |        outputColumnNames:["_col0","_col1"]
-                  |        |        Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  |        |        TableScan [TS_0]
-                  |        |           alias:src_cbo
-                  |        |           Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  |        |<-Map 5 [SIMPLE_EDGE]
-                  |           Reduce Output Operator [RS_19]
-                  |              key expressions:_col0 (type: string)
-                  |              Map-reduce partition columns:_col0 (type: string)
-                  |              sort order:+
-                  |              Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
-                  |              Select Operator [SEL_5]
-                  |                 outputColumnNames:["_col0"]
-                  |                 Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
-                  |                 Filter Operator [FIL_33]
-                  |                    predicate:(key > '2') (type: boolean)
-                  |                    Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
-                  |                    TableScan [TS_3]
-                  |                       alias:src_cbo
-                  |                       Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                  |<-Reducer 7 [SIMPLE_EDGE]
-                     Reduce Output Operator [RS_22]
-                        sort order:
-                        Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                        Select Operator [SEL_14]
-                           Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                           Filter Operator [FIL_34]
-                              predicate:(_col0 = 0) (type: boolean)
-                              Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                              Group By Operator [GBY_13]
-                              |  aggregations:["count(VALUE._col0)"]
-                              |  outputColumnNames:["_col0"]
-                              |  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                              |<-Map 6 [SIMPLE_EDGE]
-                                 Reduce Output Operator [RS_12]
+                  Select Operator [SEL_25]
+                     outputColumnNames:["_col0","_col1"]
+                     Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
+                     Filter Operator [FIL_31]
+                        predicate:_col3 is null (type: boolean)
+                        Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE
+                        Merge Join Operator [MERGEJOIN_36]
+                        |  condition map:[{"":"Left Outer Join0 to 1"}]
+                        |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                        |  outputColumnNames:["_col0","_col1","_col3"]
+                        |  Statistics:Num rows: 605 Data size: 107690 Basic stats: COMPLETE Column stats: NONE
+                        |<-Map 7 [SIMPLE_EDGE]
+                        |  Reduce Output Operator [RS_22]
+                        |     key expressions:_col0 (type: string)
+                        |     Map-reduce partition columns:_col0 (type: string)
+                        |     sort order:+
+                        |     Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+                        |     Select Operator [SEL_16]
+                        |        outputColumnNames:["_col0"]
+                        |        Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+                        |        Filter Operator [FIL_34]
+                        |           predicate:(key > '2') (type: boolean)
+                        |           Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+                        |           TableScan [TS_14]
+                        |              alias:src_cbo
+                        |              Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                        |<-Reducer 2 [SIMPLE_EDGE]
+                           Reduce Output Operator [RS_21]
+                              key expressions:_col0 (type: string)
+                              Map-reduce partition columns:_col0 (type: string)
+                              sort order:+
+                              Statistics:Num rows: 550 Data size: 97900 Basic stats: COMPLETE Column stats: NONE
+                              value expressions:_col1 (type: string)
+                              Merge Join Operator [MERGEJOIN_35]
+                              |  condition map:[{"":"Inner Join 0 to 1"}]
+                              |  keys:{}
+                              |  outputColumnNames:["_col0","_col1"]
+                              |  Statistics:Num rows: 550 Data size: 97900 Basic stats: COMPLETE Column stats: NONE
+                              |<-Map 1 [SIMPLE_EDGE]
+                              |  Reduce Output Operator [RS_18]
+                              |     sort order:
+                              |     Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                              |     value expressions:_col0 (type: string), _col1 (type: string)
+                              |     Select Operator [SEL_2]
+                              |        outputColumnNames:["_col0","_col1"]
+                              |        Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                              |        TableScan [TS_0]
+                              |           alias:src_cbo
+                              |           Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                              |<-Reducer 6 [SIMPLE_EDGE]
+                                 Reduce Output Operator [RS_19]
                                     sort order:
-                                    Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                                    value expressions:_col0 (type: bigint)
-                                    Group By Operator [GBY_11]
-                                       aggregations:["count()"]
-                                       outputColumnNames:["_col0"]
-                                       Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                                       Select Operator [SEL_8]
-                                          Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                                          Filter Operator [FIL_35]
-                                             predicate:((key > '2') and key is null) (type: boolean)
-                                             Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                                             TableScan [TS_6]
-                                                alias:src_cbo
-                                                Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                                    Select Operator [SEL_11]
+                                       Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                                       Filter Operator [FIL_32]
+                                          predicate:(_col0 = 0) (type: boolean)
+                                          Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                                          Group By Operator [GBY_10]
+                                          |  aggregations:["count(VALUE._col0)"]
+                                          |  outputColumnNames:["_col0"]
+                                          |  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                                          |<-Map 5 [SIMPLE_EDGE]
+                                             Reduce Output Operator [RS_9]
+                                                sort order:
+                                                Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                                                value expressions:_col0 (type: bigint)
+                                                Group By Operator [GBY_8]
+                                                   aggregations:["count()"]
+                                                   outputColumnNames:["_col0"]
+                                                   Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Select Operator [SEL_5]
+                                                      Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      Filter Operator [FIL_33]
+                                                         predicate:((key > '2') and key is null) (type: boolean)
+                                                         Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
+                                                         TableScan [TS_3]
+                                                            alias:src_cbo
+                                                            Statistics:Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select p_mfgr, b.p_name, p_size 
 from part b 

http://git-wip-us.apache.org/repos/asf/hive/blob/e34588e1/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
index 52b4288..29ffb47 100644
--- a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
@@ -18,27 +18,13 @@ STAGE PLANS:
   Stage: Stage-1
     Tez
       Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: key (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToInteger(_col0) (type: int)
-                      sort order: +
-                      Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-        Map 3 
-            Map Operator Tree:
-                TableScan
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
@@ -52,35 +38,22 @@ STAGE PLANS:
                         sort order: 
                         Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: UDFToInteger(_col0) (type: int)
+                      sort order: +
+                      Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Right Outer Join0 to 1
-                filter predicates:
-                  0 
-                  1 {(KEY.reducesinkkey0 < 100)}
-                keys:
-                  0 UDFToInteger(_col0) (type: int)
-                  1 _col2 (type: int)
-                outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: tinyint), _col2 (type: smallint), _col3 (type: int), _col4 (type: bigint), _col5 (type: float), _col6 (type: double), _col7 (type: string), _col8 (type: string), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: boolean), _col12 (type: boolean)
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  Limit
-                    Number of rows: 1
-                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: string), VALUE._col7 (type: string), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
@@ -94,6 +67,29 @@ STAGE PLANS:
                     Map-reduce partition columns: _col2 (type: int)
                     Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
+        Reducer 3 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                filter predicates:
+                  0 {(KEY.reducesinkkey0 < 100)}
+                  1 
+                keys:
+                  0 _col2 (type: int)
+                  1 UDFToInteger(_col0) (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 1
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -121,26 +117,12 @@ STAGE PLANS:
   Stage: Stage-1
     Tez
       Edges:
-        Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (BROADCAST_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: key (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToInteger(_col0) (type: int)
-                      sort order: +
-                      Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-        Map 2 
-            Map Operator Tree:
-                TableScan
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
@@ -154,7 +136,21 @@ STAGE PLANS:
                         sort order: 
                         Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
-        Reducer 3 
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: UDFToInteger(_col0) (type: int)
+                      sort order: +
+                      Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: string), VALUE._col7 (type: string), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean)
@@ -165,32 +161,28 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
                     condition map:
-                         Right Outer Join0 to 1
+                         Left Outer Join0 to 1
                     filter predicates:
-                      0 
-                      1 {(_col2 < 100)}
+                      0 {(_col2 < 100)}
+                      1 
                     keys:
-                      0 UDFToInteger(_col0) (type: int)
-                      1 _col2 (type: int)
-                    outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+                      0 _col2 (type: int)
+                      1 UDFToInteger(_col0) (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
                     input vertices:
-                      0 Map 1
+                      1 Map 3
                     Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                     HybridGraceHashJoin: true
-                    Select Operator
-                      expressions: _col1 (type: tinyint), _col2 (type: smallint), _col3 (type: int), _col4 (type: bigint), _col5 (type: float), _col6 (type: double), _col7 (type: string), _col8 (type: string), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: boolean), _col12 (type: boolean)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                      Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                      Limit
-                        Number of rows: 1
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/e34588e1/ql/src/test/results/clientpositive/tez/tez_smb_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_smb_empty.q.out b/ql/src/test/results/clientpositive/tez/tez_smb_empty.q.out
index 48816d1..84110c2 100644
--- a/ql/src/test/results/clientpositive/tez/tez_smb_empty.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_smb_empty.q.out
@@ -817,6 +817,10 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: s1
@@ -824,22 +828,26 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    Merge Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 key (type: int)
-                        1 key (type: int)
-                      Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator


[4/7] hive git commit: HIVE-12406: HIVE-9500 introduced incompatible change to LazySimpleSerDe public interface (Aihua Xu, reviewed by Szehon Ho)

Posted by om...@apache.org.
HIVE-12406: HIVE-9500 introduced incompatible change to LazySimpleSerDe public interface (Aihua Xu, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e429d83
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e429d83
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e429d83

Branch: refs/heads/master-fixed
Commit: 6e429d8381333edccba23d74fecd7342a69d09c1
Parents: cf6fbbd
Author: Aihua Xu <ai...@apache.org>
Authored: Mon Nov 23 14:41:58 2015 -0500
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:09 2015 -0800

----------------------------------------------------------------------
 .../hive/serde2/lazy/LazySerDeParameters.java   |  6 ++-
 .../hive/serde2/lazy/LazySimpleSerDe.java       | 38 +++++++++----
 .../hive/serde2/lazy/TestLazySimpleSerDe.java   | 56 +++++++++++++++++---
 3 files changed, 82 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6e429d83/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
index 54f6b2b..11af860 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
@@ -20,14 +20,14 @@ package org.apache.hadoop.hive.serde2.lazy;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Properties;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
+import org.apache.hadoop.hive.common.classification.InterfaceStability.Stable;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -42,6 +42,8 @@ import org.apache.hive.common.util.HiveStringUtils;
  * SerDeParameters.
  *
  */
+@Public
+@Stable
 public class LazySerDeParameters implements LazyObjectInspectorParameters {
   public static final Logger LOG = LoggerFactory.getLogger(LazySerDeParameters.class.getName());
   public static final byte[] DefaultSeparators = {(byte) 1, (byte) 2, (byte) 3};

http://git-wip-us.apache.org/repos/asf/hive/blob/6e429d83/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
index 0a2f44c..ac2d39f 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
@@ -19,16 +19,14 @@
 package org.apache.hadoop.hive.serde2.lazy;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
+import org.apache.hadoop.hive.common.classification.InterfaceStability.Stable;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractEncodingAwareSerDe;
 import org.apache.hadoop.hive.serde2.ByteStream;
@@ -36,7 +34,6 @@ import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeSpec;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
-import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyObjectInspectorParameters;
 import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyObjectInspectorParametersImpl;
 import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
@@ -48,14 +45,10 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.BinaryComparable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
-import org.apache.hive.common.util.HiveStringUtils;
-
 
 /**
  * LazySimpleSerDe can be used to read the same data format as
@@ -67,6 +60,8 @@ import org.apache.hive.common.util.HiveStringUtils;
  * Also LazySimpleSerDe outputs typed columns instead of treating all columns as
  * String like MetadataTypedColumnsetSerDe.
  */
+@Public
+@Stable
 @SerDeSpec(schemaProps = {
     serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES,
     serdeConstants.FIELD_DELIM, serdeConstants.COLLECTION_DELIM, serdeConstants.MAPKEY_DELIM,
@@ -413,4 +408,29 @@ public class LazySimpleSerDe extends AbstractEncodingAwareSerDe {
     Text text = (Text)blob;
     return SerDeUtils.transformTextToUTF8(text, this.charset);
   }
+
+  /**
+   * This method is deprecated and is only used for backward compatibility.
+   * Replaced by @see org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters#LazySerDeParameters(Configuration, Properties, String)
+   */
+  @Deprecated
+  public static SerDeParameters initSerdeParams(Configuration job,
+      Properties tbl,
+      String serdeName) throws SerDeException {
+    return new SerDeParameters(job, tbl, serdeName);
+  }
+
+  /**
+   * This class is deprecated and is only used for backward compatibility. Replace by
+   * @see org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters.
+   */
+  @Deprecated
+  public static class SerDeParameters extends org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters {
+
+    public SerDeParameters(Configuration job,
+        Properties tbl,
+        String serdeName) throws SerDeException {
+      super(job, tbl, serdeName);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6e429d83/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java
index b11ce32..391edd4 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/lazy/TestLazySimpleSerDe.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.serde2.lazy;
 import java.io.IOException;
 import java.util.List;
 import java.util.Properties;
+import java.util.Random;
 
 import junit.framework.TestCase;
 
@@ -29,15 +30,22 @@ import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.ByteStream;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
+import org.apache.hadoop.hive.serde2.binarysortable.MyTestClass;
+import org.apache.hadoop.hive.serde2.binarysortable.MyTestPrimitiveClass.ExtraTypeInfo;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.junit.Test;
 
 /**
  * TestLazySimpleSerDe.
@@ -81,8 +89,6 @@ public class TestLazySimpleSerDe extends TestCase {
     }
   }
 
-
-
   /**
    * Test the LazySimpleSerDe class with LastColumnTakesRest option.
    */
@@ -167,20 +173,56 @@ public class TestLazySimpleSerDe extends TestCase {
       throw e;
     }
   }
-  
-  Object serializeAndDeserialize(List<Integer> o1, StructObjectInspector oi1,
+
+  /**
+   * Tests the deprecated usage of SerDeParameters.
+   *
+   */
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testSerDeParameters() throws SerDeException, IOException {
+    // Setup
+    LazySimpleSerDe serDe = new LazySimpleSerDe();
+    Configuration conf = new Configuration();
+
+    MyTestClass row = new MyTestClass();
+    ExtraTypeInfo extraTypeInfo = new ExtraTypeInfo();
+    row.randomFill(new Random(1234), extraTypeInfo);
+
+    StructObjectInspector rowOI = (StructObjectInspector) ObjectInspectorFactory
+        .getReflectionObjectInspector(MyTestClass.class,
+            ObjectInspectorOptions.JAVA);
+
+    String fieldNames = ObjectInspectorUtils.getFieldNames(rowOI);
+    String fieldTypes = ObjectInspectorUtils.getFieldTypes(rowOI);
+
+    Properties schema = new Properties();
+    schema.setProperty(serdeConstants.LIST_COLUMNS, fieldNames);
+    schema.setProperty(serdeConstants.LIST_COLUMN_TYPES, fieldTypes);
+
+    SerDeUtils.initializeSerDe(serDe, conf, schema, null);
+    SerDeParameters serdeParams = LazySimpleSerDe.initSerdeParams(conf, schema, "testSerdeName");
+
+    // Test
+    LazyStruct data = (LazyStruct)serializeAndDeserialize(row, rowOI, serDe, serdeParams);
+    assertEquals((boolean)row.myBool, ((LazyBoolean)data.getField(0)).getWritableObject().get());
+    assertEquals((int)row.myInt, ((LazyInteger)data.getField(3)).getWritableObject().get());
+  }
+
+  private Object serializeAndDeserialize(Object row,
+      StructObjectInspector rowOI,
       LazySimpleSerDe serde,
       LazySerDeParameters serdeParams) throws IOException, SerDeException {
     ByteStream.Output serializeStream = new ByteStream.Output();
-    LazySimpleSerDe.serialize(serializeStream, o1, oi1, serdeParams
+    LazySimpleSerDe.serialize(serializeStream, row, rowOI, serdeParams
         .getSeparators(), 0, serdeParams.getNullSequence(), serdeParams
         .isEscaped(), serdeParams.getEscapeChar(), serdeParams
         .getNeedsEscape());
+
     Text t = new Text(serializeStream.toByteArray());
     return serde.deserialize(t);
   }
-  
-  
+
   private void deserializeAndSerialize(LazySimpleSerDe serDe, Text t, String s,
       Object[] expectedFieldsData) throws SerDeException {
     // Get the row structure


[3/7] hive git commit: HIVE-12456: QueryId can't be stored in the configuration of the SessionState since multiple queries can run in a single session (Aihua Xu, reviewed by Mohit)

Posted by om...@apache.org.
HIVE-12456: QueryId can't be stored in the configuration of the SessionState since multiple queries can run in a single session (Aihua Xu, reviewed by Mohit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f15d4e10
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f15d4e10
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f15d4e10

Branch: refs/heads/master-fixed
Commit: f15d4e108103fd2b1c42345634e167e41ded42f9
Parents: 5f726d5
Author: Aihua Xu <ai...@apache.org>
Authored: Mon Nov 23 12:20:39 2015 -0500
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:09 2015 -0800

----------------------------------------------------------------------
 .../cli/operation/ExecuteStatementOperation.java | 15 +--------------
 .../hive/service/cli/operation/Operation.java    | 19 +++++++++++++++----
 .../hive/service/cli/operation/SQLOperation.java |  4 ++--
 .../service/cli/session/HiveSessionImpl.java     |  1 -
 4 files changed, 18 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f15d4e10/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java b/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
index 3f2de10..b3d9b52 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
@@ -18,7 +18,6 @@
 package org.apache.hive.service.cli.operation;
 
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.hive.ql.processors.CommandProcessor;
@@ -29,13 +28,11 @@ import org.apache.hive.service.cli.session.HiveSession;
 
 public abstract class ExecuteStatementOperation extends Operation {
   protected String statement = null;
-  protected Map<String, String> confOverlay = new HashMap<String, String>();
 
   public ExecuteStatementOperation(HiveSession parentSession, String statement,
       Map<String, String> confOverlay, boolean runInBackground) {
-    super(parentSession, OperationType.EXECUTE_STATEMENT, runInBackground);
+    super(parentSession, confOverlay, OperationType.EXECUTE_STATEMENT, runInBackground);
     this.statement = statement;
-    setConfOverlay(confOverlay);
   }
 
   public String getStatement() {
@@ -57,14 +54,4 @@ public abstract class ExecuteStatementOperation extends Operation {
     }
     return new HiveCommandOperation(parentSession, statement, processor, confOverlay);
   }
-
-  protected Map<String, String> getConfOverlay() {
-    return confOverlay;
-  }
-
-  protected void setConfOverlay(Map<String, String> confOverlay) {
-    if (confOverlay != null) {
-      this.confOverlay = confOverlay;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/f15d4e10/service/src/java/org/apache/hive/service/cli/operation/Operation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index d13415e..25cefc2 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -21,11 +21,14 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.Sets;
+
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
@@ -50,8 +53,8 @@ import org.apache.logging.log4j.ThreadContext;
 
 public abstract class Operation {
   // Constants of the key strings for the log4j ThreadContext.
-  private static final String QUERYID = "QueryId";
-  private static final String SESSIONID = "SessionId";
+  public static final String SESSIONID_LOG_KEY = "sessionId";
+  public static final String QUERYID_LOG_KEY = "queryId";
 
   protected final HiveSession parentSession;
   private OperationState state = OperationState.INITIALIZED;
@@ -67,6 +70,7 @@ public abstract class Operation {
   protected volatile Future<?> backgroundHandle;
   protected OperationLog operationLog;
   protected boolean isOperationLogEnabled;
+  protected Map<String, String> confOverlay = new HashMap<String, String>();
 
   private long operationTimeout;
   private long lastAccessTime;
@@ -75,7 +79,14 @@ public abstract class Operation {
       EnumSet.of(FetchOrientation.FETCH_NEXT,FetchOrientation.FETCH_FIRST);
 
   protected Operation(HiveSession parentSession, OperationType opType, boolean runInBackground) {
+    this(parentSession, null, opType, runInBackground);
+ }
+
+  protected Operation(HiveSession parentSession, Map<String, String> confOverlay, OperationType opType, boolean runInBackground) {
     this.parentSession = parentSession;
+    if (confOverlay != null) {
+      this.confOverlay = confOverlay;
+    }
     this.runAsync = runInBackground;
     this.opHandle = new OperationHandle(opType, parentSession.getProtocolVersion());
     lastAccessTime = System.currentTimeMillis();
@@ -258,8 +269,8 @@ public abstract class Operation {
    * Register logging context so that Log4J can print QueryId and/or SessionId for each message
    */
   protected void registerLoggingContext() {
-    ThreadContext.put(QUERYID, SessionState.get().getQueryId());
-    ThreadContext.put(SESSIONID, SessionState.get().getSessionId());
+    ThreadContext.put(SESSIONID_LOG_KEY, SessionState.get().getSessionId());
+    ThreadContext.put(QUERYID_LOG_KEY, confOverlay.get(HiveConf.ConfVars.HIVEQUERYID.varname));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/f15d4e10/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 8b42265..1331a99 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -466,12 +466,12 @@ public class SQLOperation extends ExecuteStatementOperation {
    */
   private HiveConf getConfigForOperation() throws HiveSQLException {
     HiveConf sqlOperationConf = getParentSession().getHiveConf();
-    if (!getConfOverlay().isEmpty() || shouldRunAsync()) {
+    if (!confOverlay.isEmpty() || shouldRunAsync()) {
       // clone the partent session config for this query
       sqlOperationConf = new HiveConf(sqlOperationConf);
 
       // apply overlay query specific settings, if any
-      for (Map.Entry<String, String> confEntry : getConfOverlay().entrySet()) {
+      for (Map.Entry<String, String> confEntry : confOverlay.entrySet()) {
         try {
           sqlOperationConf.verifyAndSet(confEntry.getKey(), confEntry.getValue());
         } catch (IllegalArgumentException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f15d4e10/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 2d784f0..a14908b 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -443,7 +443,6 @@ public class HiveSessionImpl implements HiveSession {
     if (queryId == null || queryId.isEmpty()) {
       queryId = QueryPlan.makeQueryId();
       confOverlay.put(HiveConf.ConfVars.HIVEQUERYID.varname, queryId);
-      sessionState.getConf().setVar(HiveConf.ConfVars.HIVEQUERYID, queryId);
     }
 
     OperationManager operationManager = getOperationManager();


[7/7] hive git commit: HIVE-12175: Upgrade Kryo version to 3.0.x (Prasanth Jayachandran reviewed by Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12175: Upgrade Kryo version to 3.0.x (Prasanth Jayachandran reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/adbc0ab6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/adbc0ab6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/adbc0ab6

Branch: refs/heads/master-fixed
Commit: adbc0ab6aeff848dbcee83d565febd40797300c2
Parents: e34588e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Nov 24 12:43:46 2015 -0600
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:10 2015 -0800

----------------------------------------------------------------------
 itests/qtest-accumulo/pom.xml                   |   2 +-
 pom.xml                                         |   6 +-
 ql/pom.xml                                      |  36 +++--
 .../apache/hadoop/hive/ql/exec/Utilities.java   | 145 +++++++++++++++++--
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |  15 --
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |   5 -
 spark-client/pom.xml                            |  28 ++--
 .../hive/spark/client/rpc/KryoMessageCodec.java |  11 +-
 8 files changed, 185 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index 7403a15..f7325dc 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -123,7 +123,7 @@
     <!-- Declare hive-exec dependencies that were shaded in instead of
        being listed as dependencies -->
     <dependency>
-      <groupId>com.esotericsoftware.kryo</groupId>
+      <groupId>com.esotericsoftware</groupId>
       <artifactId>kryo</artifactId>
       <version>${kryo.version}</version>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index c6df4a5..c38c10f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -144,7 +144,7 @@
     <jodd.version>3.5.2</jodd.version>
     <json.version>20090211</json.version>
     <junit.version>4.11</junit.version>
-    <kryo.version>2.22</kryo.version>
+    <kryo.version>3.0.3</kryo.version>
     <libfb303.version>0.9.3</libfb303.version>
     <libthrift.version>0.9.3</libthrift.version>
     <log4j2.version>2.4</log4j2.version>
@@ -228,8 +228,8 @@
     <dependencies>
       <!-- dependencies are always listed in sorted order by groupId, artifectId -->
       <dependency>
-        <groupId>com.esotericsoftware.kryo</groupId>
-        <artifactId>kryo</artifactId>
+        <groupId>com.esotericsoftware</groupId>
+        <artifactId>kryo-shaded</artifactId>
         <version>${kryo.version}</version>
       </dependency>
       <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 9420a62..d893099 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -72,8 +72,8 @@
     </dependency>
     <!-- inter-project -->
     <dependency>
-      <groupId>com.esotericsoftware.kryo</groupId>
-      <artifactId>kryo</artifactId>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>kryo-shaded</artifactId>
       <version>${kryo.version}</version>
     </dependency>
     <dependency>
@@ -594,16 +594,20 @@
       <artifactId>spark-core_${scala.binary.version}</artifactId>
       <version>${spark.version}</version>
       <optional>true</optional>
-        <exclusions>
-             <exclusion>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commmons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-          </exclusion>
-        </exclusions>
+      <exclusions>
+       <exclusion>
+         <groupId>com.esotericsoftware.kryo</groupId>
+         <artifactId>kryo</artifactId>
+       </exclusion>
+       <exclusion>
+         <groupId>org.slf4j</groupId>
+         <artifactId>slf4j-log4j12</artifactId>
+       </exclusion>
+       <exclusion>
+         <groupId>commmons-logging</groupId>
+         <artifactId>commons-logging</artifactId>
+       </exclusion>
+     </exclusions>
    </dependency>
     <dependency>
       <groupId>com.sun.jersey</groupId>
@@ -746,7 +750,9 @@
                   <include>org.apache.hive:hive-serde</include>
                   <include>org.apache.hive:hive-llap-client</include>
                   <include>org.apache.hive:hive-metastore</include>
-                  <include>com.esotericsoftware.kryo:kryo</include>
+                  <include>com.esotericsoftware:kryo-shaded</include>
+		  <include>com.esotericsoftware:minlog</include>
+		  <include>org.objenesis:objenesis</include>
                   <include>org.apache.parquet:parquet-hadoop-bundle</include>
                   <include>org.apache.thrift:libthrift</include>
                   <include>org.apache.thrift:libfb303</include>
@@ -779,6 +785,10 @@
                   <pattern>com.esotericsoftware</pattern>
                   <shadedPattern>org.apache.hive.com.esotericsoftware</shadedPattern>
                 </relocation>
+                <relocation>
+                  <pattern>org.objenesis</pattern>
+                  <shadedPattern>org.apache.hive.org.objenesis</shadedPattern>
+                </relocation>
               </relocations>
             </configuration>
           </execution>

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 9dbb45a..8b8cf6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -37,6 +37,8 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.Serializable;
 import java.io.UnsupportedEncodingException;
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLClassLoader;
@@ -87,8 +89,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.WordUtils;
 import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.commons.lang3.tuple.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.filecache.DistributedCache;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -138,7 +138,6 @@ import org.apache.hadoop.hive.ql.io.ReworkMapredInputFormat;
 import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
-import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanMapper;
 import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanWork;
 import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateMapper;
@@ -182,6 +181,9 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardConstantListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardConstantMapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardConstantStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
@@ -207,12 +209,14 @@ import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.ReflectionUtil;
+import org.objenesis.strategy.StdInstantiatorStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
 import com.esotericsoftware.kryo.serializers.FieldSerializer;
-import com.esotericsoftware.shaded.org.objenesis.strategy.StdInstantiatorStrategy;
 import com.google.common.base.Preconditions;
 
 /**
@@ -1097,7 +1101,8 @@ public final class Utilities {
 
   // Kryo is not thread-safe,
   // Also new Kryo() is expensive, so we want to do it just once.
-  public static ThreadLocal<Kryo> runtimeSerializationKryo = new ThreadLocal<Kryo>() {
+  public static ThreadLocal<Kryo>
+      runtimeSerializationKryo = new ThreadLocal<Kryo>() {
     @Override
     protected Kryo initialValue() {
       Kryo kryo = new Kryo();
@@ -1105,10 +1110,22 @@ public final class Utilities {
       kryo.register(java.sql.Date.class, new SqlDateSerializer());
       kryo.register(java.sql.Timestamp.class, new TimestampSerializer());
       kryo.register(Path.class, new PathSerializer());
-      kryo.setInstantiatorStrategy(new StdInstantiatorStrategy());
+      kryo.register( Arrays.asList( "" ).getClass(), new ArraysAsListSerializer() );
+      ((Kryo.DefaultInstantiatorStrategy) kryo.getInstantiatorStrategy()).setFallbackInstantiatorStrategy(
+          new StdInstantiatorStrategy());
       removeField(kryo, Operator.class, "colExprMap");
-      removeField(kryo, ColumnInfo.class, "objectInspector");
       removeField(kryo, AbstractOperatorDesc.class, "statistics");
+      kryo.register(MapWork.class);
+      kryo.register(ReduceWork.class);
+      kryo.register(TableDesc.class);
+      kryo.register(UnionOperator.class);
+      kryo.register(FileSinkOperator.class);
+      kryo.register(HiveIgnoreKeyTextOutputFormat.class);
+      kryo.register(StandardConstantListObjectInspector.class);
+      kryo.register(StandardConstantMapObjectInspector.class);
+      kryo.register(StandardConstantStructObjectInspector.class);
+      kryo.register(SequenceFileInputFormat.class);
+      kryo.register(HiveSequenceFileOutputFormat.class);
       return kryo;
     };
   };
@@ -1127,15 +1144,25 @@ public final class Utilities {
       kryo.register(java.sql.Date.class, new SqlDateSerializer());
       kryo.register(java.sql.Timestamp.class, new TimestampSerializer());
       kryo.register(Path.class, new PathSerializer());
-      kryo.setInstantiatorStrategy(new StdInstantiatorStrategy());
+      kryo.register( Arrays.asList( "" ).getClass(), new ArraysAsListSerializer() );
+      ((Kryo.DefaultInstantiatorStrategy) kryo.getInstantiatorStrategy()).setFallbackInstantiatorStrategy(new StdInstantiatorStrategy());
       removeField(kryo, Operator.class, "colExprMap");
       removeField(kryo, ColumnInfo.class, "objectInspector");
+      removeField(kryo, AbstractOperatorDesc.class, "statistics");
       kryo.register(SparkEdgeProperty.class);
       kryo.register(MapWork.class);
       kryo.register(ReduceWork.class);
       kryo.register(SparkWork.class);
       kryo.register(TableDesc.class);
       kryo.register(Pair.class);
+      kryo.register(UnionOperator.class);
+      kryo.register(FileSinkOperator.class);
+      kryo.register(HiveIgnoreKeyTextOutputFormat.class);
+      kryo.register(StandardConstantListObjectInspector.class);
+      kryo.register(StandardConstantMapObjectInspector.class);
+      kryo.register(StandardConstantStructObjectInspector.class);
+      kryo.register(SequenceFileInputFormat.class);
+      kryo.register(HiveSequenceFileOutputFormat.class);
       return kryo;
     };
   };
@@ -1149,11 +1176,111 @@ public final class Utilities {
       kryo.register(java.sql.Date.class, new SqlDateSerializer());
       kryo.register(java.sql.Timestamp.class, new TimestampSerializer());
       kryo.register(Path.class, new PathSerializer());
-      kryo.setInstantiatorStrategy(new StdInstantiatorStrategy());
+      kryo.register( Arrays.asList( "" ).getClass(), new ArraysAsListSerializer() );
+      ((Kryo.DefaultInstantiatorStrategy) kryo.getInstantiatorStrategy()).setFallbackInstantiatorStrategy(
+          new StdInstantiatorStrategy());
+      removeField(kryo, Operator.class, "colExprMap");
+      removeField(kryo, AbstractOperatorDesc.class, "statistics");
+      kryo.register(MapWork.class);
+      kryo.register(ReduceWork.class);
+      kryo.register(TableDesc.class);
+      kryo.register(UnionOperator.class);
+      kryo.register(FileSinkOperator.class);
+      kryo.register(HiveIgnoreKeyTextOutputFormat.class);
+      kryo.register(StandardConstantListObjectInspector.class);
+      kryo.register(StandardConstantMapObjectInspector.class);
+      kryo.register(StandardConstantStructObjectInspector.class);
+      kryo.register(SequenceFileInputFormat.class);
+      kryo.register(HiveSequenceFileOutputFormat.class);
       return kryo;
     };
   };
 
+  /**
+   * A kryo {@link Serializer} for lists created via {@link Arrays#asList(Object...)}.
+   * <p>
+   * Note: This serializer does not support cyclic references, so if one of the objects
+   * gets set the list as attribute this might cause an error during deserialization.
+   * </p>
+   *
+   * This is from kryo-serializers package. Added explicitly to avoid classpath issues.
+   */
+  private static class ArraysAsListSerializer extends com.esotericsoftware.kryo.Serializer<List<?>> {
+
+    private Field _arrayField;
+
+    public ArraysAsListSerializer() {
+      try {
+        _arrayField = Class.forName( "java.util.Arrays$ArrayList" ).getDeclaredField( "a" );
+        _arrayField.setAccessible( true );
+      } catch ( final Exception e ) {
+        throw new RuntimeException( e );
+      }
+      // Immutable causes #copy(obj) to return the original object
+      setImmutable(true);
+    }
+
+    @Override
+    public List<?> read(final Kryo kryo, final Input input, final Class<List<?>> type) {
+      final int length = input.readInt(true);
+      Class<?> componentType = kryo.readClass( input ).getType();
+      if (componentType.isPrimitive()) {
+        componentType = getPrimitiveWrapperClass(componentType);
+      }
+      try {
+        final Object items = Array.newInstance( componentType, length );
+        for( int i = 0; i < length; i++ ) {
+          Array.set(items, i, kryo.readClassAndObject( input ));
+        }
+        return Arrays.asList( (Object[])items );
+      } catch ( final Exception e ) {
+        throw new RuntimeException( e );
+      }
+    }
+
+    @Override
+    public void write(final Kryo kryo, final Output output, final List<?> obj) {
+      try {
+        final Object[] array = (Object[]) _arrayField.get( obj );
+        output.writeInt(array.length, true);
+        final Class<?> componentType = array.getClass().getComponentType();
+        kryo.writeClass( output, componentType );
+        for( final Object item : array ) {
+          kryo.writeClassAndObject( output, item );
+        }
+      } catch ( final RuntimeException e ) {
+        // Don't eat and wrap RuntimeExceptions because the ObjectBuffer.write...
+        // handles SerializationException specifically (resizing the buffer)...
+        throw e;
+      } catch ( final Exception e ) {
+        throw new RuntimeException( e );
+      }
+    }
+
+    private Class<?> getPrimitiveWrapperClass(final Class<?> c) {
+      if (c.isPrimitive()) {
+        if (c.equals(Long.TYPE)) {
+          return Long.class;
+        } else if (c.equals(Integer.TYPE)) {
+          return Integer.class;
+        } else if (c.equals(Double.TYPE)) {
+          return Double.class;
+        } else if (c.equals(Float.TYPE)) {
+          return Float.class;
+        } else if (c.equals(Boolean.TYPE)) {
+          return Boolean.class;
+        } else if (c.equals(Character.TYPE)) {
+          return Character.class;
+        } else if (c.equals(Short.TYPE)) {
+          return Short.class;
+        } else if (c.equals(Byte.TYPE)) {
+          return Byte.class;
+        }
+      }
+      return c;
+    }
+  }
+
   public static TableDesc defaultTd;
   static {
     // by default we expect ^A separated strings

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index f4e5873..73e8f6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -31,8 +31,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -64,8 +62,6 @@ import com.google.common.collect.Interner;
 @SuppressWarnings({"serial", "deprecation"})
 public class MapWork extends BaseWork {
 
-  private static final Logger LOG = LoggerFactory.getLogger(MapWork.class);
-
   // use LinkedHashMap to make sure the iteration order is
   // deterministic, to ease testing
   private LinkedHashMap<String, ArrayList<String>> pathToAliases = new LinkedHashMap<String, ArrayList<String>>();
@@ -548,17 +544,6 @@ public class MapWork extends BaseWork {
     }
   }
 
-  public void logPathToAliases() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("LOGGING PATH TO ALIASES");
-      for (Map.Entry<String, ArrayList<String>> entry: pathToAliases.entrySet()) {
-        for (String a: entry.getValue()) {
-          LOG.debug("Path: " + entry.getKey() + ", Alias: " + a);
-        }
-      }
-    }
-  }
-
   public void setDummyTableScan(boolean dummyTableScan) {
     this.dummyTableScan = dummyTableScan;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
index 8211346..0ac625f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
@@ -23,11 +23,8 @@ import java.util.HashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
@@ -60,8 +57,6 @@ public class ReduceWork extends BaseWork {
     super(name);
   }
 
-  private static transient final Logger LOG = LoggerFactory.getLogger(ReduceWork.class);
-
   // schema of the map-reduce 'key' object - this is homogeneous
   private TableDesc keyDesc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/spark-client/pom.xml
----------------------------------------------------------------------
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index a0bbe56..9d2b418 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -39,8 +39,8 @@
 
   <dependencies>
     <dependency>
-      <groupId>com.esotericsoftware.kryo</groupId>
-      <artifactId>kryo</artifactId>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>kryo-shaded</artifactId>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
@@ -59,16 +59,20 @@
       <groupId>org.apache.spark</groupId>
       <artifactId>spark-core_${scala.binary.version}</artifactId>
       <version>${spark.version}</version>
-        <exclusions>
-             <exclusion>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commmons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-          </exclusion>
-      </exclusions>
+      <exclusions>
+       <exclusion>
+         <groupId>com.esotericsoftware.kryo</groupId>
+         <artifactId>kryo</artifactId>
+       </exclusion>
+       <exclusion>
+         <groupId>org.slf4j</groupId>
+         <artifactId>slf4j-log4j12</artifactId>
+       </exclusion>
+       <exclusion>
+         <groupId>commmons-logging</groupId>
+         <artifactId>commons-logging</artifactId>
+       </exclusion>
+     </exclusions>
    </dependency>
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/adbc0ab6/spark-client/src/main/java/org/apache/hive/spark/client/rpc/KryoMessageCodec.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/KryoMessageCodec.java b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/KryoMessageCodec.java
index 197f113..9e789cf 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/KryoMessageCodec.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/KryoMessageCodec.java
@@ -23,19 +23,20 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.List;
 
+import org.objenesis.strategy.StdInstantiatorStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.ByteBufferInputStream;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
-import com.esotericsoftware.shaded.org.objenesis.strategy.StdInstantiatorStrategy;
 import com.google.common.base.Preconditions;
+
 import io.netty.buffer.ByteBuf;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.handler.codec.ByteToMessageCodec;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 /**
  * Codec that serializes / deserializes objects using Kryo. Objects are encoded with a 4-byte
  * header with the length of the serialized data.
@@ -59,7 +60,7 @@ class KryoMessageCodec extends ByteToMessageCodec<Object> {
         kryo.register(klass, REG_ID_BASE + count);
         count++;
       }
-      kryo.setInstantiatorStrategy(new StdInstantiatorStrategy());
+      kryo.setInstantiatorStrategy(new Kryo.DefaultInstantiatorStrategy(new StdInstantiatorStrategy()));
       return kryo;
     }
   };


[5/7] hive git commit: HIVE-12489 : Analyze for partition fails if partition value has special characters (Thomas Friedrich via Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12489 : Analyze for partition fails if partition value has special characters (Thomas Friedrich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cf6fbbd2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cf6fbbd2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cf6fbbd2

Branch: refs/heads/master-fixed
Commit: cf6fbbd2da8f3eebf1054c3da1bb76b6cb540bd1
Parents: f15d4e1
Author: Thomas Friedrich <tf...@yahoo.com>
Authored: Fri Nov 20 13:55:00 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Nov 24 12:10:09 2015 -0800

----------------------------------------------------------------------
 .../ql/parse/ColumnStatsSemanticAnalyzer.java   |  6 +--
 .../queries/clientpositive/analyze_tbl_part.q   | 12 +++++
 .../clientpositive/analyze_tbl_part.q.out       | 52 ++++++++++++++++++++
 3 files changed, 67 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cf6fbbd2/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
index 543bc0f..832a5bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
@@ -199,7 +199,7 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
     if (partColType.equals(serdeConstants.STRING_TYPE_NAME) ||
         partColType.contains(serdeConstants.VARCHAR_TYPE_NAME) ||
         partColType.contains(serdeConstants.CHAR_TYPE_NAME)) {
-      returnVal = "'" + partVal + "'";
+      returnVal = "'" + escapeSQLString(partVal) + "'";
     } else if (partColType.equals(serdeConstants.TINYINT_TYPE_NAME)) {
       returnVal = partVal+"Y";
     } else if (partColType.equals(serdeConstants.SMALLINT_TYPE_NAME)) {
@@ -212,10 +212,10 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
       returnVal = partVal + "BD";
     } else if (partColType.equals(serdeConstants.DATE_TYPE_NAME) ||
         partColType.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
-      returnVal = partColType + " '" + partVal + "'";
+      returnVal = partColType + " '" + escapeSQLString(partVal) + "'";
     } else {
       //for other usually not used types, just quote the value
-      returnVal = "'" + partVal + "'";
+      returnVal = "'" + escapeSQLString(partVal) + "'";
     }
 
     return returnVal;

http://git-wip-us.apache.org/repos/asf/hive/blob/cf6fbbd2/ql/src/test/queries/clientpositive/analyze_tbl_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_part.q b/ql/src/test/queries/clientpositive/analyze_tbl_part.q
index c9e45b6..ecf1389 100644
--- a/ql/src/test/queries/clientpositive/analyze_tbl_part.q
+++ b/ql/src/test/queries/clientpositive/analyze_tbl_part.q
@@ -15,3 +15,15 @@ ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for colum
 describe formatted src_stat_part.key PARTITION(partitionId=1);
 
 describe formatted src_stat_part.value PARTITION(partitionId=2);
+
+create table src_stat_string_part(key string, value string) partitioned by (partitionName string);
+
+insert overwrite table src_stat_string_part partition (partitionName="p'1")
+select * from src1;
+
+insert overwrite table src_stat_string_part partition (partitionName="p\"1")
+select * from src1;
+
+ANALYZE TABLE src_stat_string_part partition (partitionName="p'1") COMPUTE STATISTICS for columns key, value;
+
+ANALYZE TABLE src_stat_string_part partition (partitionName="p\"1") COMPUTE STATISTICS for columns key, value;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/cf6fbbd2/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
index 40b926c..464bdf7 100644
--- a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
+++ b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
@@ -81,3 +81,55 @@ POSTHOOK: Input: default@src_stat_part
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 value               	string              	                    	                    	0                   	14                  	4.92                	7                   	                    	                    	from deserializer   
+PREHOOK: query: create table src_stat_string_part(key string, value string) partitioned by (partitionName string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_stat_string_part
+POSTHOOK: query: create table src_stat_string_part(key string, value string) partitioned by (partitionName string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_stat_string_part
+PREHOOK: query: insert overwrite table src_stat_string_part partition (partitionName="p'1")
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_string_part@partitionname=p%271
+POSTHOOK: query: insert overwrite table src_stat_string_part partition (partitionName="p'1")
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_string_part@partitionname=p%271
+POSTHOOK: Lineage: src_stat_string_part PARTITION(partitionname=p'1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_string_part PARTITION(partitionname=p'1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table src_stat_string_part partition (partitionName="p\"1")
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_string_part@partitionname=p%221
+POSTHOOK: query: insert overwrite table src_stat_string_part partition (partitionName="p\"1")
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_string_part@partitionname=p%221
+POSTHOOK: Lineage: src_stat_string_part PARTITION(partitionname=p"1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_string_part PARTITION(partitionname=p"1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p'1") COMPUTE STATISTICS for columns key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_stat_string_part
+PREHOOK: Input: default@src_stat_string_part@partitionname=p%271
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p'1") COMPUTE STATISTICS for columns key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_stat_string_part
+POSTHOOK: Input: default@src_stat_string_part@partitionname=p%271
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\"1") COMPUTE STATISTICS for columns key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_stat_string_part
+PREHOOK: Input: default@src_stat_string_part@partitionname=p%221
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\"1") COMPUTE STATISTICS for columns key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_stat_string_part
+POSTHOOK: Input: default@src_stat_string_part@partitionname=p%221
+#### A masked pattern was here ####