You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mm...@apache.org on 2016/05/03 01:59:40 UTC

[29/40] hive git commit: HIVE-12878: Support Vectorization for TEXTFILE and other formats (Matt McCline, reviewed by Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/d5285d8e/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
index 98e541a..852a679 100644
--- a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
+++ b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
@@ -81,6 +81,44 @@ POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(ep
 POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
 POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
 POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
+title	air_date	doctor	doctor_pt
+PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@episodes_partitioned
+POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@episodes_partitioned
+col_name	data_type	comment
+# col_name            	data_type           	comment             
+	 	 
+title               	string              	episode title       
+air_date            	string              	initial date        
+doctor              	int                 	main actor playing the Doctor in episode
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+doctor_pt           	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.avro.AvroSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: ALTER TABLE episodes_partitioned
 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
 WITH
@@ -149,6 +187,69 @@ SERDEPROPERTIES ('avro.schema.literal'='{
 POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@episodes_partitioned
 POSTHOOK: Output: default@episodes_partitioned
+PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@episodes_partitioned
+POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@episodes_partitioned
+col_name	data_type	comment
+# col_name            	data_type           	comment             
+	 	 
+title               	string              	episode title       
+air_date            	string              	initial date        
+doctor              	int                 	main actor playing the Doctor in episode
+value               	int                 	default value       
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+doctor_pt           	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.avro.AvroSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	avro.schema.literal 	{\n  \"namespace\": \"testing.hive.avro.serde\",\n  \"name\": \"episodes\",\n  \"type\": \"record\",\n  \"fields\": [\n    {\n      \"name\":\"title\",\n      \"type\":\"string\",\n      \"doc\":\"episode title\"\n    },\n    {\n      \"name\":\"air_date\",\n      \"type\":\"string\",\n      \"doc\":\"initial date\"\n    },\n    {\n      \"name\":\"doctor\",\n      \"type\":\"int\",\n      \"doc\":\"main actor playing the Doctor in episode\"\n    },\n     {\n       \"name\":\"value\",\n       \"type\":\"int\",\n       \"default\":0,\n       \"doc\":\"default value\"\n     }\n  ]\n}
+	serialization.format	1                   
+PREHOOK: query: EXPLAIN
+SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: episodes_partitioned
+          Statistics: Num rows: 3 Data size: 889 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: title (type: string), air_date (type: string), doctor (type: int), value (type: int), doctor_pt (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 3 Data size: 889 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
 PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
 PREHOOK: type: QUERY
 PREHOOK: Input: default@episodes_partitioned
@@ -161,6 +262,7 @@ POSTHOOK: Input: default@episodes_partitioned
 POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11
 POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9
 #### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
 Rose	26 March 2005	9	0	9
 The Doctor's Wife	14 May 2011	11	0	11
 The Eleventh Hour	3 April 2010	11	0	11
@@ -188,6 +290,7 @@ POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5
 POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6
 POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9
 #### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
 An Unearthly Child	23 November 1963	1	0	1
 Horror of Fang Rock	3 September 1977	4	0	4
 Rose	26 March 2005	9	0	9
@@ -205,6 +308,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@episodes_partitioned
 POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6
 #### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
 The Mysterious Planet	6 September 1986	6	0	6
 PREHOOK: query: -- Fetch w/non-existent partition
 SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5
@@ -216,3 +320,105 @@ SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@episodes_partitioned
 #### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
+PREHOOK: query: EXPLAIN
+SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: episodes_partitioned
+            Statistics: Num rows: 3 Data size: 889 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: title (type: string), air_date (type: string), doctor (type: int), value (type: int), doctor_pt (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Statistics: Num rows: 3 Data size: 889 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 3 Data size: 889 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+PREHOOK: type: QUERY
+PREHOOK: Input: default@episodes_partitioned
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=11
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=9
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@episodes_partitioned
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9
+#### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
+Rose	26 March 2005	9	0	9
+The Doctor's Wife	14 May 2011	11	0	11
+The Eleventh Hour	3 April 2010	11	0	11
+PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@episodes_partitioned
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=1
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=11
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=2
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=4
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=5
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=6
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=9
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@episodes_partitioned
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9
+#### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
+An Unearthly Child	23 November 1963	1	0	1
+Horror of Fang Rock	3 September 1977	4	0	4
+Rose	26 March 2005	9	0	9
+The Doctor's Wife	14 May 2011	11	0	11
+The Eleventh Hour	3 April 2010	11	0	11
+PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6
+PREHOOK: type: QUERY
+PREHOOK: Input: default@episodes_partitioned
+PREHOOK: Input: default@episodes_partitioned@doctor_pt=6
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@episodes_partitioned
+POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6
+#### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt
+The Mysterious Planet	6 September 1986	6	0	6
+PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@episodes_partitioned
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@episodes_partitioned
+#### A masked pattern was here ####
+episodes_partitioned.title	episodes_partitioned.air_date	episodes_partitioned.doctor	episodes_partitioned.value	episodes_partitioned.doctor_pt

http://git-wip-us.apache.org/repos/asf/hive/blob/d5285d8e/ql/src/test/results/clientpositive/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_groupby.q.out b/ql/src/test/results/clientpositive/bucket_groupby.q.out
index d1414fe..ae736f9 100644
--- a/ql/src/test/results/clientpositive/bucket_groupby.q.out
+++ b/ql/src/test/results/clientpositive/bucket_groupby.q.out
@@ -41,14 +41,15 @@ POSTHOOK: Output: default@clustergroupby@ds=100
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
-select key, count(1) from clustergroupby where ds='100' group by key limit 10
+select key, count(1) from clustergroupby where ds='100' group by key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-select key, count(1) from clustergroupby where ds='100' group by key limit 10
+select key, count(1) from clustergroupby where ds='100' group by key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -72,7 +73,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -81,6 +81,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -98,12 +120,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10
+PREHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=100
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=100
@@ -146,15 +168,16 @@ POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSc
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: --normal--
 explain
-select key, count(1) from clustergroupby  where ds='101'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='101'  group by key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: --normal--
 explain
-select key, count(1) from clustergroupby  where ds='101'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='101'  group by key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -178,7 +201,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -187,6 +209,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -204,12 +248,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key limit 10
+PREHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=101
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=101
@@ -370,15 +414,16 @@ POSTHOOK: Input: default@clustergroupby@ds=101
 3	416
 PREHOOK: query: --constant--
 explain
-select key, count(1) from clustergroupby  where ds='101'  group by key,3 limit 10
+select key, count(1) from clustergroupby  where ds='101'  group by key,3 order by key,3 limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: --constant--
 explain
-select key, count(1) from clustergroupby  where ds='101'  group by key,3 limit 10
+select key, count(1) from clustergroupby  where ds='101'  group by key,3 order by key,3 limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -402,7 +447,6 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), 3 (type: int)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -415,16 +459,38 @@ STAGE PLANS:
             expressions: _col0 (type: string), _col2 (type: bigint)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), 3 (type: int)
+              sort order: ++
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -432,12 +498,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key,3 limit 10
+PREHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key,3 order by key,3 limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=101
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key,3 limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby  where ds='101' group by key,3 order by key,3 limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=101
@@ -454,15 +520,16 @@ POSTHOOK: Input: default@clustergroupby@ds=101
 114	1
 PREHOOK: query: --subquery--
 explain
-select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10
+select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: --subquery--
 explain
-select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10
+select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -486,7 +553,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -495,6 +561,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -512,12 +600,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10
+PREHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=101
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10
+POSTHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=101
@@ -1020,14 +1108,15 @@ POSTHOOK: Output: default@clustergroupby@ds=102
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='102'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='102'  group by key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='102'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='102'  group by key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1052,7 +1141,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1061,6 +1149,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -1078,12 +1188,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby  where ds='102' group by key limit 10
+PREHOOK: query: select key, count(1) from clustergroupby  where ds='102' group by key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=102
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby  where ds='102' group by key limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby  where ds='102' group by key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=102
@@ -1099,14 +1209,15 @@ POSTHOOK: Input: default@clustergroupby@ds=102
 113	2
 114	1
 PREHOOK: query: explain
-select value, count(1) from clustergroupby  where ds='102'  group by value limit 10
+select value, count(1) from clustergroupby  where ds='102'  group by value order by value limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-select value, count(1) from clustergroupby  where ds='102'  group by value limit 10
+select value, count(1) from clustergroupby  where ds='102'  group by value order by value limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1130,7 +1241,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1139,6 +1249,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -1156,12 +1288,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select value, count(1) from clustergroupby  where ds='102'  group by value limit 10
+PREHOOK: query: select value, count(1) from clustergroupby  where ds='102'  group by value order by value limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=102
 #### A masked pattern was here ####
-POSTHOOK: query: select value, count(1) from clustergroupby  where ds='102'  group by value limit 10
+POSTHOOK: query: select value, count(1) from clustergroupby  where ds='102'  group by value order by value limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=102
@@ -1297,14 +1429,15 @@ POSTHOOK: Output: default@clustergroupby@ds=103
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=103).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: clustergroupby PARTITION(ds=103).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='103'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='103'  group by key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='103'  group by key limit 10
+select key, count(1) from clustergroupby  where ds='103'  group by key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1329,7 +1462,6 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1338,6 +1470,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
@@ -1355,12 +1509,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by key limit 10
+PREHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=103
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by key limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=103
@@ -1376,14 +1530,15 @@ POSTHOOK: Input: default@clustergroupby@ds=103
 113	2
 114	1
 PREHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='103'  group by value, key limit 10
+select key, count(1) from clustergroupby  where ds='103'  group by value, key order by key limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-select key, count(1) from clustergroupby  where ds='103'  group by value, key limit 10
+select key, count(1) from clustergroupby  where ds='103'  group by value, key order by key limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1407,7 +1562,6 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1420,16 +1574,38 @@ STAGE PLANS:
             expressions: _col1 (type: string), _col2 (type: bigint)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1437,12 +1613,12 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by  value, key limit 10
+PREHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by  value, key order by key limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@clustergroupby
 PREHOOK: Input: default@clustergroupby@ds=103
 #### A masked pattern was here ####
-POSTHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by  value, key limit 10
+POSTHOOK: query: select key, count(1) from clustergroupby  where ds='103' group by  value, key order by key limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@clustergroupby
 POSTHOOK: Input: default@clustergroupby@ds=103

http://git-wip-us.apache.org/repos/asf/hive/blob/d5285d8e/ql/src/test/results/clientpositive/groupby_sort_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_10.q.out b/ql/src/test/results/clientpositive/groupby_sort_10.q.out
index c682e95..9b8d388 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_10.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_10.q.out
@@ -1,9 +1,13 @@
-PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@T1
-POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/d5285d8e/ql/src/test/results/clientpositive/schema_evol_text_fetchwork_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/schema_evol_text_fetchwork_table.q.out b/ql/src/test/results/clientpositive/schema_evol_text_fetchwork_table.q.out
deleted file mode 100644
index f849004..0000000
--- a/ql/src/test/results/clientpositive/schema_evol_text_fetchwork_table.q.out
+++ /dev/null
@@ -1,298 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
---
--- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
---
---
--- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
----
-CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
---
--- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
---
---
--- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
----
-CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table1
-PREHOOK: query: insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__1
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__1
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
-alter table table1 add columns(c int, d string)
-PREHOOK: type: ALTERTABLE_ADDCOLS
-PREHOOK: Input: default@table1
-PREHOOK: Output: default@table1
-POSTHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
-alter table table1 add columns(c int, d string)
-POSTHOOK: type: ALTERTABLE_ADDCOLS
-POSTHOOK: Input: default@table1
-POSTHOOK: Output: default@table1
-PREHOOK: query: insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__2
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__2
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: table1.d SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
-PREHOOK: query: insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__3
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__3
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1.c EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: table1.d SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
-PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
-select a,b from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
-select a,b from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b
-1	new
-1	original
-2	new
-2	original
-3	new
-3	original
-4	new
-4	original
-5	new
-6	new
-PREHOOK: query: select a,b,c from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b,c from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b	c
-1	new	10
-1	original	NULL
-2	new	20
-2	original	NULL
-3	new	30
-3	original	NULL
-4	new	40
-4	original	NULL
-5	new	100
-6	new	200
-PREHOOK: query: select a,b,c,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b,c,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b	c	d
-1	new	10	ten
-1	original	NULL	NULL
-2	new	20	twenty
-2	original	NULL	NULL
-3	new	30	thirty
-3	original	NULL	NULL
-4	new	40	forty
-4	original	NULL	NULL
-5	new	100	hundred
-6	new	200	two hundred
-PREHOOK: query: select a,c,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,c,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	c	d
-1	10	ten
-1	NULL	NULL
-2	20	twenty
-2	NULL	NULL
-3	30	thirty
-3	NULL	NULL
-4	40	forty
-4	NULL	NULL
-5	100	hundred
-6	200	two hundred
-PREHOOK: query: select a,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	d
-1	NULL
-1	ten
-2	NULL
-2	twenty
-3	NULL
-3	thirty
-4	NULL
-4	forty
-5	hundred
-6	two hundred
-PREHOOK: query: select c from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select c from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-c
-10
-100
-20
-200
-30
-40
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: select d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-d
-NULL
-NULL
-NULL
-NULL
-forty
-hundred
-ten
-thirty
-twenty
-two hundred
-PREHOOK: query: --
--- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
--- smallint = (2-byte signed integer, from -32,768 to 32,767)
---
-CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table2
-POSTHOOK: query: --
--- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
--- smallint = (2-byte signed integer, from -32,768 to 32,767)
---
-CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table2
-PREHOOK: query: insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__4
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__4
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table table2 change column a a int
-PREHOOK: type: ALTERTABLE_RENAMECOL
-PREHOOK: Input: default@table2
-PREHOOK: Output: default@table2
-POSTHOOK: query: -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table table2 change column a a int
-POSTHOOK: type: ALTERTABLE_RENAMECOL
-POSTHOOK: Input: default@table2
-POSTHOOK: Output: default@table2
-PREHOOK: query: insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__5
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__5
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: insert into table table2 values(5000, 'new'),(90000, 'new')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__6
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(5000, 'new'),(90000, 'new')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__6
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: select a,b from table2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table2
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b from table2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table2
-#### A masked pattern was here ####
-a	b
-1000	original
-200	new
-3	original
-32768	new
-4	original
-40000	new
-5000	new
-6737	original
-72909	new
-90000	new
-PREHOOK: query: DROP TABLE table1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@table1
-PREHOOK: Output: default@table1
-POSTHOOK: query: DROP TABLE table1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@table1
-POSTHOOK: Output: default@table1
-PREHOOK: query: DROP TABLE table2
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@table2
-PREHOOK: Output: default@table2
-POSTHOOK: query: DROP TABLE table2
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@table2
-POSTHOOK: Output: default@table2

http://git-wip-us.apache.org/repos/asf/hive/blob/d5285d8e/ql/src/test/results/clientpositive/schema_evol_text_mapwork_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/schema_evol_text_mapwork_table.q.out b/ql/src/test/results/clientpositive/schema_evol_text_mapwork_table.q.out
deleted file mode 100644
index f849004..0000000
--- a/ql/src/test/results/clientpositive/schema_evol_text_mapwork_table.q.out
+++ /dev/null
@@ -1,298 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
---
--- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
---
---
--- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
----
-CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
---
--- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
---
---
--- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
----
-CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table1
-PREHOOK: query: insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__1
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__1
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
-alter table table1 add columns(c int, d string)
-PREHOOK: type: ALTERTABLE_ADDCOLS
-PREHOOK: Input: default@table1
-PREHOOK: Output: default@table1
-POSTHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
-alter table table1 add columns(c int, d string)
-POSTHOOK: type: ALTERTABLE_ADDCOLS
-POSTHOOK: Input: default@table1
-POSTHOOK: Output: default@table1
-PREHOOK: query: insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__2
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__2
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: table1.d SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
-PREHOOK: query: insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__3
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__3
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.b SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1.c EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: table1.d SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
-PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
-select a,b from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
-select a,b from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b
-1	new
-1	original
-2	new
-2	original
-3	new
-3	original
-4	new
-4	original
-5	new
-6	new
-PREHOOK: query: select a,b,c from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b,c from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b	c
-1	new	10
-1	original	NULL
-2	new	20
-2	original	NULL
-3	new	30
-3	original	NULL
-4	new	40
-4	original	NULL
-5	new	100
-6	new	200
-PREHOOK: query: select a,b,c,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b,c,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	b	c	d
-1	new	10	ten
-1	original	NULL	NULL
-2	new	20	twenty
-2	original	NULL	NULL
-3	new	30	thirty
-3	original	NULL	NULL
-4	new	40	forty
-4	original	NULL	NULL
-5	new	100	hundred
-6	new	200	two hundred
-PREHOOK: query: select a,c,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,c,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	c	d
-1	10	ten
-1	NULL	NULL
-2	20	twenty
-2	NULL	NULL
-3	30	thirty
-3	NULL	NULL
-4	40	forty
-4	NULL	NULL
-5	100	hundred
-6	200	two hundred
-PREHOOK: query: select a,d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select a,d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-a	d
-1	NULL
-1	ten
-2	NULL
-2	twenty
-3	NULL
-3	thirty
-4	NULL
-4	forty
-5	hundred
-6	two hundred
-PREHOOK: query: select c from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select c from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-c
-10
-100
-20
-200
-30
-40
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: select d from table1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table1
-#### A masked pattern was here ####
-POSTHOOK: query: select d from table1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table1
-#### A masked pattern was here ####
-d
-NULL
-NULL
-NULL
-NULL
-forty
-hundred
-ten
-thirty
-twenty
-two hundred
-PREHOOK: query: --
--- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
--- smallint = (2-byte signed integer, from -32,768 to 32,767)
---
-CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table2
-POSTHOOK: query: --
--- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
--- smallint = (2-byte signed integer, from -32,768 to 32,767)
---
-CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table2
-PREHOOK: query: insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__4
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__4
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table table2 change column a a int
-PREHOOK: type: ALTERTABLE_RENAMECOL
-PREHOOK: Input: default@table2
-PREHOOK: Output: default@table2
-POSTHOOK: query: -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table table2 change column a a int
-POSTHOOK: type: ALTERTABLE_RENAMECOL
-POSTHOOK: Input: default@table2
-POSTHOOK: Output: default@table2
-PREHOOK: query: insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__5
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__5
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: insert into table table2 values(5000, 'new'),(90000, 'new')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__6
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table table2 values(5000, 'new'),(90000, 'new')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__6
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table2.b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
-PREHOOK: query: select a,b from table2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@table2
-#### A masked pattern was here ####
-POSTHOOK: query: select a,b from table2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@table2
-#### A masked pattern was here ####
-a	b
-1000	original
-200	new
-3	original
-32768	new
-4	original
-40000	new
-5000	new
-6737	original
-72909	new
-90000	new
-PREHOOK: query: DROP TABLE table1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@table1
-PREHOOK: Output: default@table1
-POSTHOOK: query: DROP TABLE table1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@table1
-POSTHOOK: Output: default@table1
-PREHOOK: query: DROP TABLE table2
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@table2
-PREHOOK: Output: default@table2
-POSTHOOK: query: DROP TABLE table2
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@table2
-POSTHOOK: Output: default@table2