You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2016/10/11 23:10:04 UTC

[09/22] hive git commit: HIVE-14896 : Stabilize golden files for currently failing tests

HIVE-14896 : Stabilize golden files for currently failing tests


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e1fa2787
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e1fa2787
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e1fa2787

Branch: refs/heads/hive-14535
Commit: e1fa2787ffb5b65e54ed5c087ca3ed8060f18d83
Parents: 8e0b19b
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Oct 5 15:26:15 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Oct 5 22:50:45 2016 -0700

----------------------------------------------------------------------
 .../test/queries/clientpositive/acid_mapjoin.q  |   3 +-
 ql/src/test/queries/clientpositive/ctas.q       |   2 +-
 .../test/queries/clientpositive/ctas_hadoop20.q |  63 --
 .../results/clientpositive/acid_mapjoin.q.out   |  30 +-
 ql/src/test/results/clientpositive/ctas.q.out   | 118 +--
 .../results/clientpositive/ctas_hadoop20.q.out  | 945 -------------------
 .../test/results/clientpositive/llap/ctas.q.out |  81 +-
 .../results/clientpositive/spark/ctas.q.out     |  81 +-
 .../vector_join_part_col_char.q.out             |   6 +-
 9 files changed, 35 insertions(+), 1294 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/queries/clientpositive/acid_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_mapjoin.q b/ql/src/test/queries/clientpositive/acid_mapjoin.q
index 5eee6e7..e3d14cc 100644
--- a/ql/src/test/queries/clientpositive/acid_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/acid_mapjoin.q
@@ -13,7 +13,8 @@ create table acid2 (key int, value string) clustered by (key) into 2 buckets sto
 
 insert into acid1 values (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e'), (6, 'f'), (7, 'g'), (8, 'h');
 insert into acid2 values (1,'a'),(3,'c'),(5,'e'),(7,'g');
-
+alter table acid2 update statistics set('numRows'='210', 'rawDataSize'='840');
+alter table acid1 update statistics set('numRows'='316', 'rawDataSize'='1265');
 explain
 select count(*) from acid1 join acid2 on acid1.key = acid2.key;
 select count(*) from acid1 join acid2 on acid1.key = acid2.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/queries/clientpositive/ctas.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ctas.q b/ql/src/test/queries/clientpositive/ctas.q
index edd1f6a..57a4729 100644
--- a/ql/src/test/queries/clientpositive/ctas.q
+++ b/ql/src/test/queries/clientpositive/ctas.q
@@ -49,7 +49,7 @@ select * from nzhang_ctas4;
 
 describe formatted nzhang_CTAS4;
 
-explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
+explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
 
 set mapreduce.framework.name=yarn;
 set mapreduce.jobtracker.address=localhost:58;

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/queries/clientpositive/ctas_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ctas_hadoop20.q b/ql/src/test/queries/clientpositive/ctas_hadoop20.q
deleted file mode 100644
index e275b7b..0000000
--- a/ql/src/test/queries/clientpositive/ctas_hadoop20.q
+++ /dev/null
@@ -1,63 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-
-create table nzhang_Tmp(a int, b string);
-select * from nzhang_Tmp;
-
-explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-
-create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-
-select * from nzhang_CTAS1;
-
-describe formatted nzhang_CTAS1;
-
-
-explain create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-
-create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-
-select * from nzhang_ctas2;
-
-describe formatted nzhang_CTAS2;
-
-
-explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-
-create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-
-select * from nzhang_ctas3;
-
-describe formatted nzhang_CTAS3;
-
-
-explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-
-create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-
-select * from nzhang_ctas3;
-
-describe formatted nzhang_CTAS3;
-
-
-explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-
-create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-
-select * from nzhang_ctas4;
-
-describe formatted nzhang_CTAS4;
-
-explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-
-create table nzhang_ctas6 (key string, `to` string);
-insert overwrite table nzhang_ctas6 select key, value from src limit 10;
-create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
-
-create table nzhang_ctas8 as select 3.14BD from nzhang_ctas6 limit 1;
-desc nzhang_ctas8;
-drop table nzhang_ctas8;

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/acid_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_mapjoin.q.out b/ql/src/test/results/clientpositive/acid_mapjoin.q.out
index 036c56c..402c6ab 100644
--- a/ql/src/test/results/clientpositive/acid_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/acid_mapjoin.q.out
@@ -42,6 +42,22 @@ POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@acid2
 POSTHOOK: Lineage: acid2.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: acid2.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: alter table acid2 update statistics set('numRows'='210', 'rawDataSize'='840')
+PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@acid2
+PREHOOK: Output: default@acid2
+POSTHOOK: query: alter table acid2 update statistics set('numRows'='210', 'rawDataSize'='840')
+POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@acid2
+POSTHOOK: Output: default@acid2
+PREHOOK: query: alter table acid1 update statistics set('numRows'='316', 'rawDataSize'='1265')
+PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
+PREHOOK: Input: default@acid1
+PREHOOK: Output: default@acid1
+POSTHOOK: query: alter table acid1 update statistics set('numRows'='316', 'rawDataSize'='1265')
+POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS
+POSTHOOK: Input: default@acid1
+POSTHOOK: Output: default@acid1
 PREHOOK: query: explain
 select count(*) from acid1 join acid2 on acid1.key = acid2.key
 PREHOOK: type: QUERY
@@ -64,14 +80,14 @@ STAGE PLANS:
         $hdt$_1:acid2 
           TableScan
             alias: acid2
-            Statistics: Num rows: 209 Data size: 839 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 209 Data size: 839 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 209 Data size: 839 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: int)
@@ -82,21 +98,21 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid1
-            Statistics: Num rows: 312 Data size: 1251 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 312 Data size: 1251 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 312 Data size: 1251 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: int)
                     1 _col0 (type: int)
-                  Statistics: Num rows: 343 Data size: 1376 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 347 Data size: 1391 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 7f1c912..a9c3136b 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -690,9 +690,9 @@ Sort Columns:       	[]
 Storage Desc Params:	 	 
 	field.delim         	,                   
 	serialization.format	,                   
-PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -708,70 +708,15 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: string), _col1 (type: string)
-                null sort order: aa
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                tag: -1
-                TopN: 10
                 TopN Hash Memory Usage: 0.1
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-      Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -782,62 +727,20 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    columns _col0,_col1
-                    columns.types string,string
-                    escape.delim \
-                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
-            GatherStats: false
             Reduce Output Operator
               key expressions: _col0 (type: string), _col1 (type: string)
-              null sort order: aa
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-              tag: -1
-              TopN: 10
               TopN Hash Memory Usage: 0.1
-              auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10003
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              columns _col0,_col1
-              columns.types string,string
-              escape.delim \
-              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                columns _col0,_col1
-                columns.types string,string
-                escape.delim \
-                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-      Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -848,28 +751,12 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              GlobalTableId: 1
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    columns key,value
-                    columns.types string:string
-                    field.delim ,
-                    line.delim 
-
-                    name default.nzhang_ctas5
-                    serialization.format ,
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.nzhang_ctas5
-              TotalFiles: 1
-              GatherStats: true
-              MultiFileSpray: false
 
   Stage: Stage-0
     Move Operator
@@ -891,7 +778,6 @@ STAGE PLANS:
 
   Stage: Stage-3
     Stats-Aggr Operator
-#### A masked pattern was here ####
 
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out b/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
deleted file mode 100644
index 4af1e0a..0000000
--- a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
+++ /dev/null
@@ -1,945 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-create table nzhang_Tmp(a int, b string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-create table nzhang_Tmp(a int, b string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@nzhang_Tmp
-PREHOOK: query: select * from nzhang_Tmp
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_tmp
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_Tmp
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_tmp
-#### A masked pattern was here ####
-PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
-                sort order: ++
-                Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_CTAS1
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: k string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          name: nzhang_CTAS1
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_CTAS1
-PREHOOK: query: select * from nzhang_CTAS1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_ctas1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_CTAS1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_ctas1
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-PREHOOK: query: describe formatted nzhang_CTAS1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas1
-POSTHOOK: query: describe formatted nzhang_CTAS1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas1
-# col_name            	data_type           	comment             
-	 	 
-k                   	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	10                  
-	rawDataSize         	96                  
-	totalSize           	106                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
-                sort order: ++
-                Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_ctas2
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          name: nzhang_ctas2
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_ctas2
-PREHOOK: query: select * from nzhang_ctas2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_ctas2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_ctas2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_ctas2
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-PREHOOK: query: describe formatted nzhang_CTAS2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas2
-POSTHOOK: query: describe formatted nzhang_CTAS2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas2
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	10                  
-	rawDataSize         	96                  
-	totalSize           	106                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: (key / 2) (type: double), concat(value, '_con') (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: double), _col1 (type: string)
-                sort order: ++
-                Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: double), _col1 (type: string)
-              sort order: ++
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                  name: default.nzhang_ctas3
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: half_key double, conb string
-          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          name: nzhang_ctas3
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_ctas3
-PREHOOK: query: select * from nzhang_ctas3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_ctas3
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_ctas3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_ctas3
-#### A masked pattern was here ####
-0.0	val_0_con
-0.0	val_0_con
-0.0	val_0_con
-1.0	val_2_con
-2.0	val_4_con
-2.5	val_5_con
-2.5	val_5_con
-2.5	val_5_con
-4.0	val_8_con
-4.5	val_9_con
-PREHOOK: query: describe formatted nzhang_CTAS3
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas3
-POSTHOOK: query: describe formatted nzhang_CTAS3
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas3
-# col_name            	data_type           	comment             
-	 	 
-half_key            	double              	                    
-conb                	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	10                  
-	rawDataSize         	120                 
-	totalSize           	199                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
-PREHOOK: type: CREATETABLE
-POSTHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
-POSTHOOK: type: CREATETABLE
-STAGE DEPENDENCIES:
-
-STAGE PLANS:
-PREHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
-PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
-POSTHOOK: type: CREATETABLE
-PREHOOK: query: select * from nzhang_ctas3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_ctas3
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_ctas3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_ctas3
-#### A masked pattern was here ####
-0.0	val_0_con
-0.0	val_0_con
-0.0	val_0_con
-1.0	val_2_con
-2.0	val_4_con
-2.5	val_5_con
-2.5	val_5_con
-2.5	val_5_con
-4.0	val_8_con
-4.5	val_9_con
-PREHOOK: query: describe formatted nzhang_CTAS3
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas3
-POSTHOOK: query: describe formatted nzhang_CTAS3
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas3
-# col_name            	data_type           	comment             
-	 	 
-half_key            	double              	                    
-conb                	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	10                  
-	rawDataSize         	120                 
-	totalSize           	199                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
-                sort order: ++
-                Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_ctas4
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          name: nzhang_ctas4
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_ctas4
-PREHOOK: query: select * from nzhang_ctas4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_ctas4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_ctas4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_ctas4
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-PREHOOK: query: describe formatted nzhang_CTAS4
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas4
-POSTHOOK: query: describe formatted nzhang_CTAS4
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas4
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	10                  
-	rawDataSize         	96                  
-	totalSize           	106                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	field.delim         	,                   
-	serialization.format	,                   
-PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-ABSTRACT SYNTAX TREE:
-  
-TOK_CREATETABLE
-   TOK_TABNAME
-      nzhang_ctas5
-   TOK_LIKETABLE
-   TOK_TABLEROWFORMAT
-      TOK_SERDEPROPS
-         TOK_TABLEROWFORMATFIELD
-            ','
-         TOK_TABLEROWFORMATLINES
-            '\012'
-   TOK_TBLTEXTFILE
-   TOK_QUERY
-      TOK_FROM
-         TOK_TABREF
-            TOK_TABNAME
-               src
-      TOK_INSERT
-         TOK_DESTINATION
-            TOK_DIR
-               TOK_TMP_FILE
-         TOK_SELECT
-            TOK_SELEXPR
-               TOK_TABLE_OR_COL
-                  key
-            TOK_SELEXPR
-               TOK_TABLE_OR_COL
-                  value
-         TOK_SORTBY
-            TOK_TABSORTCOLNAMEASC
-               TOK_TABLE_OR_COL
-                  key
-            TOK_TABSORTCOLNAMEASC
-               TOK_TABLE_OR_COL
-                  value
-         TOK_LIMIT
-            10
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
-                sort order: ++
-                Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-                tag: -1
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 0
-              rawDataSize 0
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 0
-                rawDataSize 0
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    columns _col0,_col1
-                    columns.types string,string
-                    escape.delim \
-                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            GatherStats: false
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-              tag: -1
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10002
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              columns _col0,_col1
-              columns.types string,string
-              escape.delim \
-              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                columns _col0,_col1
-                columns.types string,string
-                escape.delim \
-                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 1
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              Statistics: Num rows: 10 Data size: 2000 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    columns key,value
-                    columns.types string:string
-                    field.delim ,
-                    line.delim 
-
-                    name default.nzhang_ctas5
-                    serialization.format ,
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_ctas5
-              TotalFiles: 1
-              GatherStats: true
-              MultiFileSpray: false
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: key string, value string
-          field delimiter: ,
-          input format: org.apache.hadoop.mapred.TextInputFormat
-          line delimiter: 
-
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          name: nzhang_ctas5
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_ctas5
-PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: create table nzhang_ctas6 (key string, `to` string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@nzhang_ctas6
-PREHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@nzhang_ctas6
-POSTHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_ctas6
-POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@nzhang_ctas6
-POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@nzhang_ctas6
-POSTHOOK: Output: default@nzhang_ctas7
-PREHOOK: query: create table nzhang_ctas8 as select 3.14BD from nzhang_ctas6 limit 1
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@nzhang_ctas6
-POSTHOOK: query: create table nzhang_ctas8 as select 3.14BD from nzhang_ctas6 limit 1
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@nzhang_ctas6
-POSTHOOK: Output: default@nzhang_ctas8
-PREHOOK: query: desc nzhang_ctas8
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_ctas8
-POSTHOOK: query: desc nzhang_ctas8
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_ctas8
-_c0                 	decimal(3,2)        	                    
-PREHOOK: query: drop table nzhang_ctas8
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@nzhang_ctas8
-PREHOOK: Output: default@nzhang_ctas8
-POSTHOOK: query: drop table nzhang_ctas8
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@nzhang_ctas8
-POSTHOOK: Output: default@nzhang_ctas8

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/llap/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ctas.q.out b/ql/src/test/results/clientpositive/llap/ctas.q.out
index dab0251..e2e7640 100644
--- a/ql/src/test/results/clientpositive/llap/ctas.q.out
+++ b/ql/src/test/results/clientpositive/llap/ctas.q.out
@@ -710,9 +710,9 @@ Sort Columns:       	[]
 Storage Desc Params:	 	 
 	field.delim         	,                   
 	serialization.format	,                   
-PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -735,74 +735,19 @@ STAGE PLANS:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  GatherStats: false
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
-                      null sort order: aa
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                      tag: -1
-                      TopN: 10
                       TopN Hash Memory Usage: 0.1
-                      auto parallelism: true
             Execution mode: llap
             LLAP IO: no inputs
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [src]
         Reducer 2 
             Execution mode: llap
-            Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -813,16 +758,11 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
-                    null sort order: aa
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
-                    tag: -1
-                    TopN: 10
                     TopN Hash Memory Usage: 0.1
-                    auto parallelism: false
         Reducer 3 
             Execution mode: llap
-            Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -833,28 +773,12 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    GlobalTableId: 1
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
                     Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE
-#### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          columns key,value
-                          columns.types string:string
-                          field.delim ,
-                          line.delim 
-
-                          name default.nzhang_ctas5
-                          serialization.format ,
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.nzhang_ctas5
-                    TotalFiles: 1
-                    GatherStats: true
-                    MultiFileSpray: false
 
   Stage: Stage-2
     Dependency Collection
@@ -873,7 +797,6 @@ STAGE PLANS:
 
   Stage: Stage-3
     Stats-Aggr Operator
-#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index 7a44cc7..a723365 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -674,9 +674,9 @@ Sort Columns:       	[]
 Storage Desc Params:	 	 
 	field.delim         	,                   
 	serialization.format	,                   
-PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: query: explain create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -697,71 +697,16 @@ STAGE PLANS:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: string)
-                      null sort order: aa
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      tag: -1
-                      TopN: 10
                       TopN Hash Memory Usage: 0.1
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [src]
         Reducer 2 
-            Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -772,15 +717,10 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
-                    null sort order: aa
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    TopN: 10
                     TopN Hash Memory Usage: 0.1
-                    auto parallelism: false
         Reducer 3 
-            Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -791,28 +731,12 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    GlobalTableId: 1
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          columns key,value
-                          columns.types string:string
-                          field.delim ,
-                          line.delim 
-
-                          name default.nzhang_ctas5
-                          serialization.format ,
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.nzhang_ctas5
-                    TotalFiles: 1
-                    GatherStats: true
-                    MultiFileSpray: false
 
   Stage: Stage-0
     Move Operator
@@ -834,7 +758,6 @@ STAGE PLANS:
 
   Stage: Stage-2
     Stats-Aggr Operator
-#### A masked pattern was here ####
 
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/e1fa2787/ql/src/test/results/clientpositive/vector_join_part_col_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join_part_col_char.q.out b/ql/src/test/results/clientpositive/vector_join_part_col_char.q.out
index b6631f7..6f6efc6 100644
--- a/ql/src/test/results/clientpositive/vector_join_part_col_char.q.out
+++ b/ql/src/test/results/clientpositive/vector_join_part_col_char.q.out
@@ -134,7 +134,7 @@ STAGE PLANS:
                 sort order: +
                 Map-reduce partition columns: _col2 (type: char(50))
                 Statistics: Num rows: 2 Data size: 203 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: int), _col2 (type: char(5))
+                value expressions: _col0 (type: string), _col1 (type: int)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -148,8 +148,8 @@ STAGE PLANS:
             compressed: false
             Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
             table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0