You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by px...@apache.org on 2016/05/24 04:11:36 UTC
[1/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/master 2ed47838d -> ec4b936e6
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
new file mode 100644
index 0000000..4a7b2b7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
@@ -0,0 +1,268 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: EXPLAIN
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-6 depends on stages: Stage-1 , consists of Stage-7, Stage-0, Stage-3
+ Stage-7
+ Stage-5 depends on stages: Stage-7
+ Stage-0 depends on stages: Stage-5
+ Stage-2 depends on stages: Stage-0
+ Stage-8 depends on stages: Stage-2, Stage-3
+ Stage-3 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Inner Join 0 to 1
+ handleSkewJoin: true
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(key, 16), compute_stats(value, 16)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-6
+ Conditional Operator
+
+ Stage: Stage-7
+ Map Reduce Local Work
+ Alias -> Map Local Tables:
+ 1
+ Fetch Operator
+ limit: -1
+ Alias -> Map Local Operator Tree:
+ 1
+ TableScan
+ HashTable Sink Operator
+ keys:
+ 0 reducesinkkey0 (type: string)
+ 1 reducesinkkey0 (type: string)
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 reducesinkkey0 (type: string)
+ 1 reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col2
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(key, 16), compute_stats(value, 16)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ Local Work:
+ Map Reduce Local Work
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-8
+ Column Stats Work
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, string
+ Table: default.dest_j1
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted dest_j1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest_j1
+POSTHOOK: query: desc formatted dest_j1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest_j1
+# col_name data_type comment
+
+key int
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 137
+ numRows 855
+ rawDataSize 9143
+ totalSize 11996
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted dest_j1 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest_j1
+POSTHOOK: query: desc formatted dest_j1 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest_j1
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key int 0 498 0 196 from deserializer
+PREHOOK: query: desc formatted dest_j1 value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest_j1
+POSTHOOK: query: desc formatted dest_j1 value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest_j1
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+value string 0 214 6.834630350194552 7 from deserializer
[4/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
new file mode 100644
index 0000000..ee41910
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
@@ -0,0 +1,420 @@
+PREHOOK: query: drop table src_multi1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: analyze table src_multi1 compute statistics for columns key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table src_multi1 compute statistics for columns key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table src_multi1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: insert into table src_multi1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: describe formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [1, 3]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 6
+ totalSize 8
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Input: default@nzhang_part14@ds=1/hr=3
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Input: default@nzhang_part14@ds=1/hr=3
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [1, 3]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 6
+ totalSize 8
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2, 1]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 8
+ totalSize 10
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [1, 3]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 4
+ rawDataSize 12
+ totalSize 16
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2, 1]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 4
+ rawDataSize 16
+ totalSize 20
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
new file mode 100644
index 0000000..676a27a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -0,0 +1,260 @@
+PREHOOK: query: create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dtt
+PREHOOK: query: desc formatted acid_dtt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@acid_dtt
+POSTHOOK: query: desc formatted acid_dtt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@acid_dtt
+# col_name data_type comment
+
+a int
+b varchar(128)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+ transactional true
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 2
+Bucket Columns: [a]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-5 depends on stages: Stage-3, Stage-4
+ Stage-4 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: cint is not null (type: boolean)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int), CAST( cstring1 AS varchar(128)) (type: varchar(128))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: varchar(128))
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(128))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: int), _col1 (type: varchar(128))
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: varchar(128))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.acid_dtt
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: varchar(128))
+ outputColumnNames: a, b
+ Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 16), compute_stats(b, 16)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.acid_dtt
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-5
+ Column Stats Work
+ Column Stats Desc:
+ Columns: a, b
+ Column Types: int, varchar(128)
+ Table: default.acid_dtt
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+PREHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dtt
+POSTHOOK: Lineage: acid_dtt.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dtt.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: desc formatted acid_dtt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@acid_dtt
+POSTHOOK: query: desc formatted acid_dtt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@acid_dtt
+# col_name data_type comment
+
+a int
+b varchar(128)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 10
+ rawDataSize 0
+ totalSize 1714
+ transactional true
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 2
+Bucket Columns: [a]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dtt
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dtt
+POSTHOOK: Output: default@acid_dtt
+PREHOOK: query: desc formatted acid_dtt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@acid_dtt
+POSTHOOK: query: desc formatted acid_dtt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@acid_dtt
+# col_name data_type comment
+
+a int
+b varchar(128)
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 4
+ numRows 8
+ rawDataSize 0
+ totalSize 2719
+ transactional true
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 2
+Bucket Columns: [a]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
new file mode 100644
index 0000000..e905748
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
@@ -0,0 +1,664 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partitioned1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partitioned1
+PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-8 depends on stages: Stage-2
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: values__tmp__table__1
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), 1 (type: int)
+ outputColumnNames: a, b, part
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 16), compute_stats(b, 16)
+ keys: 1 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 1 (type: int)
+ sort order: +
+ Map-reduce partition columns: 1 (type: int)
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ keys: 1 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 1 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ part 1
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-8
+ Column Stats Work
+ Column Stats Desc:
+ Columns: a, b
+ Column Types: int, string
+ Table: default.partitioned1
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@partitioned1@part=1
+POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@partitioned1@part=1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+_col0 _col1
+PREHOOK: query: desc formatted partitioned1 partition(part=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type comment
+# col_name data_type comment
+
+a int
+b string
+
+# Partition Information
+# col_name data_type comment
+
+part int
+
+# Detailed Partition Information
+Partition Value: [1]
+Database: default
+Table: partitioned1
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 4
+ rawDataSize 40
+ totalSize 44
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted partitioned1 partition(part=1) a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1) a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+a int 1 4 0 5 from deserializer
+PREHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string)
+PREHOOK: type: ALTERTABLE_ADDCOLS
+PREHOOK: Input: default@partitioned1
+PREHOOK: Output: default@partitioned1
+POSTHOOK: query: -- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string)
+POSTHOOK: type: ALTERTABLE_ADDCOLS
+POSTHOOK: Input: default@partitioned1
+POSTHOOK: Output: default@partitioned1
+PREHOOK: query: desc formatted partitioned1 partition(part=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type comment
+# col_name data_type comment
+
+a int
+b string
+
+# Partition Information
+# col_name data_type comment
+
+part int
+
+# Detailed Partition Information
+Partition Value: [1]
+Database: default
+Table: partitioned1
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 4
+ rawDataSize 40
+ totalSize 44
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-8 depends on stages: Stage-2
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: values__tmp__table__3
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), 2 (type: int)
+ outputColumnNames: a, b, c, d, part
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 16), compute_stats(b, 16), compute_stats(c, 16), compute_stats(d, 16)
+ keys: 2 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 2 (type: int)
+ sort order: +
+ Map-reduce partition columns: 2 (type: int)
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
+ keys: 2 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 2 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ part 2
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-8
+ Column Stats Work
+ Column Stats Desc:
+ Columns: a, b, c, d
+ Column Types: int, string, int, string
+ Table: default.partitioned1
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__4
+PREHOOK: Output: default@partitioned1@part=2
+POSTHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__4
+POSTHOOK: Output: default@partitioned1@part=2
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+_col0 _col1 _col2 _col3
+PREHOOK: query: desc formatted partitioned1 partition(part=2)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=2)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type comment
+# col_name data_type comment
+
+a int
+b string
+c int
+d string
+
+# Partition Information
+# col_name data_type comment
+
+part int
+
+# Detailed Partition Information
+Partition Value: [2]
+Database: default
+Table: partitioned1
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 4
+ rawDataSize 56
+ totalSize 60
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted partitioned1 partition(part=2) c
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=2) c
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+c int 10 40 0 4 from deserializer
+PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-8 depends on stages: Stage-2
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: values__tmp__table__5
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), 1 (type: int)
+ outputColumnNames: a, b, c, d, part
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(a, 16), compute_stats(b, 16), compute_stats(c, 16), compute_stats(d, 16)
+ keys: 1 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 1 (type: int)
+ sort order: +
+ Map-reduce partition columns: 1 (type: int)
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
+ keys: 1 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 1 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ part 1
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-8
+ Column Stats Work
+ Column Stats Desc:
+ Columns: a, b, c, d
+ Column Types: int, string, int, string
+ Table: default.partitioned1
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.partitioned1
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__6
+PREHOOK: Output: default@partitioned1@part=1
+POSTHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__6
+POSTHOOK: Output: default@partitioned1@part=1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).c EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).d SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+_col0 _col1 _col2 _col3
+PREHOOK: query: desc formatted partitioned1 partition(part=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type comment
+# col_name data_type comment
+
+a int
+b string
+
+# Partition Information
+# col_name data_type comment
+
+part int
+
+# Detailed Partition Information
+Partition Value: [1]
+Database: default
+Table: partitioned1
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 6
+ rawDataSize 78
+ totalSize 84
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted partitioned1 partition(part=1) a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1) a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+a int 1 6 0 5 from deserializer
+PREHOOK: query: desc formatted partitioned1 partition(part=1) c
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1) c
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+c int 100 200 0 3 from deserializer
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
new file mode 100644
index 0000000..de75d8a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
@@ -0,0 +1,299 @@
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge2a
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-8 depends on stages: Stage-2
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string), UDFToString(_col3) (type: string)
+ outputColumnNames: key, value, one, two, three
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(key, 16), compute_stats(value, 16)
+ keys: '1' (type: string), two (type: string), three (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: '1' (type: string), _col1 (type: string), _col2 (type: string)
+ sort order: +++
+ Map-reduce partition columns: '1' (type: string), _col1 (type: string), _col2 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ keys: '1' (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), '1' (type: string), _col1 (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ one 1
+ three
+ two
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-8
+ Column Stats Work
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, string
+ Table: default.orcfile_merge2a
+
+ Stage: Stage-3
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge2a@one=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=7
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Output: default@orcfile_merge2a
[3/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
new file mode 100644
index 0000000..9422d65
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
@@ -0,0 +1,216 @@
+PREHOOK: query: -- Taken from groupby2.q
+CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: -- Taken from groupby2.q
+CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_g2
+PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_temp
+POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_temp
+PREHOOK: query: explain FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-6 depends on stages: Stage-3, Stage-5
+ Stage-4 depends on stages: Stage-2
+ Stage-5 depends on stages: Stage-4
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src_temp
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0)
+ keys: KEY._col0 (type: string)
+ mode: partial1
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: double)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ keys: KEY._col0 (type: string)
+ mode: final
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g2
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
+ outputColumnNames: key, c1, c2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-6
+ Column Stats Work
+ Column Stats Desc:
+ Columns: key, c1, c2
+ Column Types: string, int, string
+ Table: default.dest_g2
+
+ Stage: Stage-4
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: rand() (type: double)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: string), 16 (type: int), c1 (type: int), c2 (type: string)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0, 16), compute_stats(VALUE._col2, 16), compute_stats(VALUE._col3, 16)
+ mode: partial1
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+ mode: final
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+PREHOOK: query: FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_temp
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_temp
+POSTHOOK: Output: default@dest_g2
+POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: SELECT dest_g2.* FROM dest_g2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_g2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest_g2.* FROM dest_g2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_g2
+#### A masked pattern was here ####
+0 1 00.0
+1 71 116414.0
+2 69 225571.0
+3 62 332004.0
+4 74 452763.0
+5 6 5397.0
+6 5 6398.0
+7 6 7735.0
+8 8 8762.0
+9 7 91047.0
+PREHOOK: query: DROP TABLE dest_g2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@dest_g2
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: DROP TABLE dest_g2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@dest_g2
+POSTHOOK: Output: default@dest_g2
+PREHOOK: query: DROP TABLE src_temp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_temp
+PREHOOK: Output: default@src_temp
+POSTHOOK: query: DROP TABLE src_temp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_temp
+POSTHOOK: Output: default@src_temp
[7/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
HIVE-13566: Auto-gather column stats - phase 1 (Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ec4b936e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ec4b936e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ec4b936e
Branch: refs/heads/master
Commit: ec4b936e66db559cd7226f66d416dad02864530f
Parents: 2ed4783
Author: Pengcheng Xiong <px...@apache.org>
Authored: Mon May 23 20:22:33 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Mon May 23 20:22:33 2016 -0700
----------------------------------------------------------------------
.../java/org/apache/hive/beeline/BeeLine.java | 14 +-
.../java/org/apache/hive/beeline/Commands.java | 72 +-
.../org/apache/hadoop/hive/conf/HiveConf.java | 4 +-
.../hive/beeline/TestBeeLineWithArgs.java | 24 +-
jdbc/src/java/org/apache/hive/jdbc/Utils.java | 54 +-
.../apache/hadoop/hive/ql/exec/Operator.java | 6 +-
.../hadoop/hive/ql/exec/TextRecordReader.java | 4 +-
.../hadoop/hive/ql/exec/TextRecordWriter.java | 4 +-
.../apache/hadoop/hive/ql/exec/Utilities.java | 24 -
.../hadoop/hive/ql/exec/tez/TezProcessor.java | 3 -
.../apache/hadoop/hive/ql/metadata/Hive.java | 10 -
.../physical/GenMRSkewJoinProcessor.java | 17 +-
.../physical/GenSparkSkewJoinProcessor.java | 18 +-
.../physical/SparkMapJoinResolver.java | 4 +-
.../ql/parse/ColumnStatsAutoGatherContext.java | 291 ++
.../ql/parse/ColumnStatsSemanticAnalyzer.java | 71 +-
.../hadoop/hive/ql/parse/ParseContext.java | 12 +
.../hadoop/hive/ql/parse/QBParseInfo.java | 20 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 71 +-
.../hadoop/hive/ql/parse/TaskCompiler.java | 63 +-
.../ql/plan/ConditionalResolverSkewJoin.java | 10 +-
.../queries/clientpositive/autoColumnStats_1.q | 192 ++
.../queries/clientpositive/autoColumnStats_2.q | 214 ++
.../queries/clientpositive/autoColumnStats_3.q | 67 +
.../queries/clientpositive/autoColumnStats_4.q | 20 +
.../queries/clientpositive/autoColumnStats_5.q | 47 +
.../queries/clientpositive/autoColumnStats_6.q | 41 +
.../queries/clientpositive/autoColumnStats_7.q | 19 +
.../queries/clientpositive/autoColumnStats_8.q | 27 +
.../queries/clientpositive/autoColumnStats_9.q | 22 +
.../clientpositive/autoColumnStats_1.q.out | 1379 +++++++++
.../clientpositive/autoColumnStats_2.q.out | 1500 ++++++++++
.../clientpositive/autoColumnStats_3.q.out | 420 +++
.../clientpositive/autoColumnStats_4.q.out | 260 ++
.../clientpositive/autoColumnStats_5.q.out | 664 +++++
.../clientpositive/autoColumnStats_6.q.out | 299 ++
.../clientpositive/autoColumnStats_7.q.out | 216 ++
.../clientpositive/autoColumnStats_8.q.out | 2624 ++++++++++++++++++
.../clientpositive/autoColumnStats_9.q.out | 268 ++
39 files changed, 8845 insertions(+), 230 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 9138613..734eeb8 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -93,9 +93,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.io.IOUtils;
import org.apache.hive.beeline.cli.CliOptionsProcessor;
-import org.apache.hive.jdbc.Utils;
-import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
-
/**
* A console SQL shell with command completion.
* <p>
@@ -142,6 +139,7 @@ public class BeeLine implements Closeable {
private static final Options options = new Options();
public static final String BEELINE_DEFAULT_JDBC_DRIVER = "org.apache.hive.jdbc.HiveDriver";
+ public static final String BEELINE_DEFAULT_JDBC_URL = "jdbc:hive2://";
public static final String DEFAULT_DATABASE_NAME = "default";
private static final String SCRIPT_OUTPUT_PREFIX = ">>>";
@@ -768,14 +766,6 @@ public class BeeLine implements Closeable {
*/
if (url != null) {
- if (user == null) {
- user = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_USER);
- }
-
- if (pass == null) {
- pass = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_PASSWD);
- }
-
String com = constructCmd(url, user, pass, driver, false);
String comForDebug = constructCmd(url, user, pass, driver, true);
debug("issuing: " + comForDebug);
@@ -904,7 +894,7 @@ public class BeeLine implements Closeable {
}
private int embeddedConnect() {
- if (!execCommandWithPrefix("!connect " + Utils.URL_PREFIX + " '' ''")) {
+ if (!execCommandWithPrefix("!connect " + BEELINE_DEFAULT_JDBC_URL + " '' ''")) {
return ERRNO_OTHER;
} else {
return ERRNO_OK;
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 3a204c0..80703ff 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -61,8 +61,6 @@ import java.util.TreeSet;
import org.apache.hadoop.hive.common.cli.ShellCmdExecutor;
import org.apache.hive.jdbc.HiveStatement;
-import org.apache.hive.jdbc.Utils;
-import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
public class Commands {
@@ -1316,41 +1314,18 @@ public class Commands {
Properties props = new Properties();
if (url != null) {
String saveUrl = getUrlToUse(url);
- props.setProperty(JdbcConnectionParams.PROPERTY_URL, url);
+ props.setProperty("url", saveUrl);
}
-
- String value = null;
if (driver != null) {
- props.setProperty(JdbcConnectionParams.PROPERTY_DRIVER, driver);
- } else {
- value = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.PROPERTY_DRIVER);
- if (value != null) {
- props.setProperty(JdbcConnectionParams.PROPERTY_DRIVER, value);
- }
+ props.setProperty("driver", driver);
}
-
if (user != null) {
- props.setProperty(JdbcConnectionParams.AUTH_USER, user);
- } else {
- value = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_USER);
- if (value != null) {
- props.setProperty(JdbcConnectionParams.AUTH_USER, value);
- }
+ props.setProperty("user", user);
}
-
if (pass != null) {
- props.setProperty(JdbcConnectionParams.AUTH_PASSWD, pass);
- } else {
- value = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_PASSWD);
- if (value != null) {
- props.setProperty(JdbcConnectionParams.AUTH_PASSWD, value);
- }
+ props.setProperty("password", pass);
}
- value = Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_TYPE);
- if (value != null) {
- props.setProperty(JdbcConnectionParams.AUTH_TYPE, value);
- }
return connect(props);
}
@@ -1403,25 +1378,26 @@ public class Commands {
public boolean connect(Properties props) throws IOException {
String url = getProperty(props, new String[] {
- JdbcConnectionParams.PROPERTY_URL,
+ "url",
"javax.jdo.option.ConnectionURL",
"ConnectionURL",
});
String driver = getProperty(props, new String[] {
- JdbcConnectionParams.PROPERTY_DRIVER,
+ "driver",
"javax.jdo.option.ConnectionDriverName",
"ConnectionDriverName",
});
String username = getProperty(props, new String[] {
- JdbcConnectionParams.AUTH_USER,
+ "user",
"javax.jdo.option.ConnectionUserName",
"ConnectionUserName",
});
String password = getProperty(props, new String[] {
- JdbcConnectionParams.AUTH_PASSWD,
+ "password",
"javax.jdo.option.ConnectionPassword",
"ConnectionPassword",
});
+ String auth = getProperty(props, new String[] {"auth"});
if (url == null || url.length() == 0) {
return beeLine.error("Property \"url\" is required");
@@ -1432,25 +1408,23 @@ public class Commands {
}
}
- String auth = getProperty(props, new String[] {JdbcConnectionParams.AUTH_TYPE});
+ beeLine.info("Connecting to " + url);
+
+ if (username == null) {
+ username = beeLine.getConsoleReader().readLine("Enter username for " + url + ": ");
+ }
+ props.setProperty("user", username);
+ if (password == null) {
+ password = beeLine.getConsoleReader().readLine("Enter password for " + url + ": ",
+ new Character('*'));
+ }
+ props.setProperty("password", password);
+
if (auth == null) {
auth = beeLine.getOpts().getAuthType();
- if (auth != null) {
- props.setProperty(JdbcConnectionParams.AUTH_TYPE, auth);
- }
}
-
- beeLine.info("Connecting to " + url);
- if (Utils.parsePropertyFromUrl(url, JdbcConnectionParams.AUTH_PRINCIPAL) == null) {
- if (username == null) {
- username = beeLine.getConsoleReader().readLine("Enter username for " + url + ": ");
- }
- props.setProperty(JdbcConnectionParams.AUTH_USER, username);
- if (password == null) {
- password = beeLine.getConsoleReader().readLine("Enter password for " + url + ": ",
- new Character('*'));
- }
- props.setProperty(JdbcConnectionParams.AUTH_PASSWD, password);
+ if (auth != null) {
+ props.setProperty("auth", auth);
}
try {
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c0843b9..ed20069 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1519,7 +1519,9 @@ public class HiveConf extends Configuration {
// Statistics
HIVESTATSAUTOGATHER("hive.stats.autogather", true,
- "A flag to gather statistics automatically during the INSERT OVERWRITE command."),
+ "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
+ HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", false,
+ "A flag to gather column statistics automatically."),
HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
"The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
"each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index ecfeddb..f9909ad 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -39,7 +39,6 @@ import java.util.List;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hive.jdbc.Utils;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -176,7 +175,6 @@ public class TestBeeLineWithArgs {
// Put the script content in a temp file
File scriptFile = File.createTempFile(this.getClass().getSimpleName(), "temp");
- System.out.println("script file is " + scriptFile.getAbsolutePath());
scriptFile.deleteOnExit();
PrintStream os = new PrintStream(new FileOutputStream(scriptFile));
os.print(scriptText);
@@ -657,7 +655,7 @@ public class TestBeeLineWithArgs {
@Test
public void testEmbeddedBeelineConnection() throws Throwable{
- String embeddedJdbcURL = Utils.URL_PREFIX+"/Default";
+ String embeddedJdbcURL = BeeLine.BEELINE_DEFAULT_JDBC_URL+"/Default";
List<String> argList = getBaseArgs(embeddedJdbcURL);
argList.add("--hivevar");
argList.add("DUMMY_TBL=embedded_table");
@@ -772,7 +770,7 @@ public class TestBeeLineWithArgs {
@Test
public void testEmbeddedBeelineOutputs() throws Throwable{
- String embeddedJdbcURL = Utils.URL_PREFIX+"/Default";
+ String embeddedJdbcURL = BeeLine.BEELINE_DEFAULT_JDBC_URL+"/Default";
List<String> argList = getBaseArgs(embeddedJdbcURL);
// Set to non-zk lock manager to avoid trying to connect to zookeeper
final String SCRIPT_TEXT =
@@ -845,22 +843,4 @@ public class TestBeeLineWithArgs {
}
- /**
- * Attempt to execute a simple script file with the usage of user & password variables in URL.
- * Test for presence of an expected pattern
- * in the output (stdout or stderr), fail if not found
- * Print PASSED or FAILED
- */
- @Test
- public void testConnectionWithURLParams() throws Throwable {
- final String EXPECTED_PATTERN = " hivetest ";
- List<String> argList = new ArrayList<String>();
- argList.add("-d");
- argList.add(BeeLine.BEELINE_DEFAULT_JDBC_DRIVER);
- argList.add("-u");
- argList.add(miniHS2.getBaseJdbcURL() + ";user=hivetest;password=hive");
- String SCRIPT_TEXT = "select current_user();";
-
- testScriptFile( SCRIPT_TEXT, EXPECTED_PATTERN, true, argList);
- }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/jdbc/src/java/org/apache/hive/jdbc/Utils.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/Utils.java b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 7ea6309..42181d7 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -37,12 +37,12 @@ import org.apache.http.cookie.Cookie;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class Utils {
+class Utils {
static final Logger LOG = LoggerFactory.getLogger(Utils.class.getName());
/**
* The required prefix for the connection URL.
*/
- public static final String URL_PREFIX = "jdbc:hive2://";
+ static final String URL_PREFIX = "jdbc:hive2://";
/**
* If host is provided, without a port.
@@ -63,7 +63,7 @@ public class Utils {
static final String HIVE_SERVER2_RETRY_TRUE = "true";
static final String HIVE_SERVER2_RETRY_FALSE = "false";
- public static class JdbcConnectionParams {
+ static class JdbcConnectionParams {
// Note on client side parameter naming convention:
// Prefer using a shorter camelCase param name instead of using the same name as the
// corresponding
@@ -76,33 +76,31 @@ public class Utils {
// Retry setting
static final String RETRIES = "retries";
- public static final String AUTH_TYPE = "auth";
+ static final String AUTH_TYPE = "auth";
// We're deprecating this variable's name.
- public static final String AUTH_QOP_DEPRECATED = "sasl.qop";
- public static final String AUTH_QOP = "saslQop";
- public static final String AUTH_SIMPLE = "noSasl";
- public static final String AUTH_TOKEN = "delegationToken";
- public static final String AUTH_USER = "user";
- public static final String AUTH_PRINCIPAL = "principal";
- public static final String AUTH_PASSWD = "password";
- public static final String AUTH_KERBEROS_AUTH_TYPE = "kerberosAuthType";
- public static final String AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT = "fromSubject";
- public static final String ANONYMOUS_USER = "anonymous";
- public static final String ANONYMOUS_PASSWD = "anonymous";
- public static final String USE_SSL = "ssl";
- public static final String SSL_TRUST_STORE = "sslTrustStore";
- public static final String SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
+ static final String AUTH_QOP_DEPRECATED = "sasl.qop";
+ static final String AUTH_QOP = "saslQop";
+ static final String AUTH_SIMPLE = "noSasl";
+ static final String AUTH_TOKEN = "delegationToken";
+ static final String AUTH_USER = "user";
+ static final String AUTH_PRINCIPAL = "principal";
+ static final String AUTH_PASSWD = "password";
+ static final String AUTH_KERBEROS_AUTH_TYPE = "kerberosAuthType";
+ static final String AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT = "fromSubject";
+ static final String ANONYMOUS_USER = "anonymous";
+ static final String ANONYMOUS_PASSWD = "anonymous";
+ static final String USE_SSL = "ssl";
+ static final String SSL_TRUST_STORE = "sslTrustStore";
+ static final String SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
// We're deprecating the name and placement of this in the parsed map (from hive conf vars to
// hive session vars).
static final String TRANSPORT_MODE_DEPRECATED = "hive.server2.transport.mode";
- public static final String TRANSPORT_MODE = "transportMode";
+ static final String TRANSPORT_MODE = "transportMode";
// We're deprecating the name and placement of this in the parsed map (from hive conf vars to
// hive session vars).
static final String HTTP_PATH_DEPRECATED = "hive.server2.thrift.http.path";
- public static final String HTTP_PATH = "httpPath";
- public static final String SERVICE_DISCOVERY_MODE = "serviceDiscoveryMode";
- public static final String PROPERTY_DRIVER = "driver";
- public static final String PROPERTY_URL = "url";
+ static final String HTTP_PATH = "httpPath";
+ static final String SERVICE_DISCOVERY_MODE = "serviceDiscoveryMode";
// Don't use dynamic service discovery
static final String SERVICE_DISCOVERY_MODE_NONE = "none";
// Use ZooKeeper for indirection while using dynamic service discovery
@@ -633,14 +631,4 @@ public class Utils {
}
return true;
}
-
- public static String parsePropertyFromUrl(final String url, final String key) {
- String[] tokens = url.split(";");
- for (String token : tokens) {
- if (token.trim().startsWith(key.trim() + "=")) {
- return token.trim().substring((key.trim() + "=").length());
- }
- }
- return null;
- }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 00552a8..636f079 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -406,11 +406,7 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
}
}
if (asyncEx != null) {
- if (asyncEx instanceof Exception) {
- throw new HiveException("Async initialization failed", asyncEx);
- } else {
- throw (Error) asyncEx;
- }
+ throw new HiveException("Async initialization failed", asyncEx);
}
completeInitializationOp(os);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java
index 47ab9c2..8319f11 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java
@@ -40,14 +40,12 @@ public class TextRecordReader implements RecordReader {
private InputStream in;
private Text row;
private Configuration conf;
- private boolean escape;
public void initialize(InputStream in, Configuration conf, Properties tbl)
throws IOException {
lineReader = new LineReader(in, conf);
this.in = in;
this.conf = conf;
- escape = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE);
}
public Writable createRow() throws IOException {
@@ -62,7 +60,7 @@ public class TextRecordReader implements RecordReader {
int bytesConsumed = lineReader.readLine((Text) row);
- if (escape) {
+ if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) {
return HiveUtils.unescapeText((Text) row);
}
return bytesConsumed;
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java
index f15458d..10b4594 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java
@@ -35,20 +35,18 @@ public class TextRecordWriter implements RecordWriter {
private OutputStream out;
private Configuration conf;
- private boolean escape;
public void initialize(OutputStream out, Configuration conf)
throws IOException {
this.out = out;
this.conf = conf;
- escape = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE);
}
public void write(Writable row) throws IOException {
Text text = (Text) row;
Text escapeText = text;
- if (escape) {
+ if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) {
escapeText = HiveUtils.escapeText(text);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8144c3b..7082931 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.exec;
import java.util.ArrayList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-
import java.beans.DefaultPersistenceDelegate;
import java.beans.Encoder;
import java.beans.Expression;
@@ -97,7 +96,6 @@ import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.llap.io.api.LlapProxy;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -196,7 +194,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.KryoException;
import com.google.common.base.Preconditions;
/**
@@ -439,7 +436,6 @@ public final class Utilities {
throw new RuntimeException("Unknown work type: " + name);
}
}
-
gWorkMap.get(conf).put(path, gWork);
} else if (LOG.isDebugEnabled()) {
LOG.debug("Found plan in cache for name: " + name);
@@ -450,16 +446,6 @@ public final class Utilities {
LOG.debug("File not found: " + fnf.getMessage());
LOG.info("No plan file found: "+path);
return null;
- } catch (KryoException ke) {
- Throwable cnfThrowable = findClassNotFoundException(ke);
- if (LlapProxy.isDaemon() && (cnfThrowable != null)) {
- LOG.error("Missing class \"" + cnfThrowable.getMessage() + "\". If this is a UDF and you " +
- "are running LLAP, you may need to regenerate the llap startup script and restart " +
- "llap with jars for your udf.", cnfThrowable);
- throw new RuntimeException("Cannot find \"" + cnfThrowable.getMessage() + "\" You may" +
- " need to regenerate the LLAP startup script and restart llap daemons.", cnfThrowable);
- }
- throw new RuntimeException(ke);
} catch (Exception e) {
String msg = "Failed to load plan: " + path + ": " + e;
LOG.error(msg, e);
@@ -474,16 +460,6 @@ public final class Utilities {
}
}
- private static Throwable findClassNotFoundException(Throwable ke) {
- while (ke != null) {
- if (ke instanceof ClassNotFoundException) {
- return ke;
- }
- ke = ke.getCause();
- }
- return null;
- }
-
public static void setWorkflowAdjacencies(Configuration conf, QueryPlan plan) {
try {
Graph stageGraph = plan.getQueryPlan().getStageGraph();
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
index a33b6e2..c560f37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
@@ -39,7 +39,6 @@ import org.apache.tez.runtime.api.ExecutionContext;
import org.apache.tez.runtime.api.LogicalInput;
import org.apache.tez.runtime.api.LogicalOutput;
import org.apache.tez.runtime.api.ProcessorContext;
-import org.apache.tez.runtime.api.TaskFailureType;
import org.apache.tez.runtime.library.api.KeyValueWriter;
/**
@@ -179,8 +178,6 @@ public class TezProcessor extends AbstractLogicalIOProcessor {
} finally {
if (originalThrowable != null && originalThrowable instanceof Error) {
LOG.error(StringUtils.stringifyException(originalThrowable));
- getContext().reportFailure(TaskFailureType.FATAL, originalThrowable,
- "Cannot recover from this error");
throw new RuntimeException(originalThrowable);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d9f58f2..3fa1233 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2860,16 +2860,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
if (destIsSubDir) {
FileStatus[] srcs = destFs.listStatus(srcf, FileUtils.HIDDEN_FILES_PATH_FILTER);
- if (inheritPerms) {
- try {
- HdfsUtils.setFullFileStatus(conf, destStatus, destFs, destf, false);
- } catch (IOException e) {
- String msg = "Error setting permission of file " + destf;
- LOG.error(msg);
- throw new HiveException(msg, e);
- }
- }
-
List<Future<Void>> futures = new LinkedList<>();
final ExecutorService pool = Executors.newFixedThreadPool(
conf.getIntVar(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT),
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
index 9fbbd4c..f41fa4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
@@ -117,12 +117,6 @@ public final class GenMRSkewJoinProcessor {
}
List<Task<? extends Serializable>> children = currTask.getChildTasks();
- if (children != null && children.size() > 1) {
- throw new SemanticException("Should not happened");
- }
-
- Task<? extends Serializable> child =
- children != null && children.size() == 1 ? children.get(0) : null;
Path baseTmpDir = parseCtx.getContext().getMRTmpPath();
@@ -347,13 +341,14 @@ public final class GenMRSkewJoinProcessor {
tsk.addDependentTask(oldChild);
}
}
- }
- if (child != null) {
- currTask.removeDependentTask(child);
- listTasks.add(child);
+ currTask.setChildTasks(new ArrayList<Task<? extends Serializable>>());
+ for (Task<? extends Serializable> oldChild : children) {
+ oldChild.getParentTasks().remove(currTask);
+ }
+ listTasks.addAll(children);
}
ConditionalResolverSkewJoinCtx context =
- new ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, child);
+ new ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, children);
ConditionalWork cndWork = new ConditionalWork(listWorks);
ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf());
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
index 11ec07a..ded9231 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
@@ -93,9 +93,6 @@ public class GenSparkSkewJoinProcessor {
List<Task<? extends Serializable>> children = currTask.getChildTasks();
- Task<? extends Serializable> child =
- children != null && children.size() == 1 ? children.get(0) : null;
-
Path baseTmpDir = parseCtx.getContext().getMRTmpPath();
JoinDesc joinDescriptor = joinOp.getConf();
@@ -334,14 +331,17 @@ public class GenSparkSkewJoinProcessor {
tsk.addDependentTask(oldChild);
}
}
- }
- if (child != null) {
- currTask.removeDependentTask(child);
- listTasks.add(child);
- listWorks.add(child.getWork());
+ currTask.setChildTasks(new ArrayList<Task<? extends Serializable>>());
+ for (Task<? extends Serializable> oldChild : children) {
+ oldChild.getParentTasks().remove(currTask);
+ }
+ listTasks.addAll(children);
+ for (Task<? extends Serializable> oldChild : children) {
+ listWorks.add(oldChild.getWork());
+ }
}
ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx context =
- new ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, child);
+ new ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, children);
ConditionalWork cndWork = new ConditionalWork(listWorks);
ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf());
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
index 8e56263..a3ec990 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
@@ -411,7 +411,9 @@ public class SparkMapJoinResolver implements PhysicalPlanResolver {
context.setDirToTaskMap(newbigKeysDirToTaskMap);
// update no skew task
if (context.getNoSkewTask() != null && context.getNoSkewTask().equals(originalTask)) {
- context.setNoSkewTask(newTask);
+ List<Task<? extends Serializable>> noSkewTask = new ArrayList<>();
+ noSkewTask.add(newTask);
+ context.setNoSkewTask(noSkewTask);
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
new file mode 100644
index 0000000..15a47dc
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.exec.ColumnInfo;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
+import org.apache.hadoop.hive.ql.exec.SelectOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+
+/**
+ * ColumnStatsAutoGatherContext: This is passed to the compiler when set
+ * hive.stats.autogather=true during the INSERT OVERWRITE command.
+ *
+ **/
+
+public class ColumnStatsAutoGatherContext {
+
+ public AnalyzeRewriteContext analyzeRewrite;
+ private final List<LoadFileDesc> loadFileWork = new ArrayList<>();
+ private final SemanticAnalyzer sa;
+ private final HiveConf conf;
+ private final Operator<? extends OperatorDesc> op;
+ private final List<FieldSchema> columns;
+ private final List<FieldSchema> partitionColumns;
+ private boolean isInsertInto;
+ private Table tbl;
+ private Map<String, String> partSpec;
+
+ public ColumnStatsAutoGatherContext(SemanticAnalyzer sa, HiveConf conf,
+ Operator<? extends OperatorDesc> op, Table tbl, Map<String, String> partSpec,
+ boolean isInsertInto) throws SemanticException {
+ super();
+ this.sa = sa;
+ this.conf = conf;
+ this.op = op;
+ this.tbl = tbl;
+ this.partSpec = partSpec;
+ this.isInsertInto = isInsertInto;
+ columns = tbl.getCols();
+ partitionColumns = tbl.getPartCols();
+ }
+
+ public List<LoadFileDesc> getLoadFileWork() {
+ return loadFileWork;
+ }
+
+ public AnalyzeRewriteContext getAnalyzeRewrite() {
+ return analyzeRewrite;
+ }
+
+ public void setAnalyzeRewrite(AnalyzeRewriteContext analyzeRewrite) {
+ this.analyzeRewrite = analyzeRewrite;
+ }
+
+ public void insertAnalyzePipeline() throws SemanticException{
+ // 1. Generate the statement of analyze table [tablename] compute statistics for columns
+ // In non-partitioned table case, it will generate TS-SEL-GBY-RS-GBY-SEL-FS operator
+ // In static-partitioned table case, it will generate TS-FIL(partitionKey)-SEL-GBY(partitionKey)-RS-GBY-SEL-FS operator
+ // In dynamic-partitioned table case, it will generate TS-SEL-GBY(partitionKey)-RS-GBY-SEL-FS operator
+ // However, we do not need to specify the partition-spec because (1) the data is going to be inserted to that specific partition
+ // (2) we can compose the static/dynamic partition using a select operator in replaceSelectOperatorProcess..
+ String analyzeCommand = "analyze table `" + tbl.getDbName() + "`.`" + tbl.getTableName() + "`"
+ + " compute statistics for columns ";
+
+ // 2. Based on the statement, generate the selectOperator
+ Operator<?> selOp = null;
+ try {
+ selOp = genSelOpForAnalyze(analyzeCommand);
+ } catch (IOException | ParseException e) {
+ throw new SemanticException(e);
+ }
+
+ // 3. attach this SEL to the operator right before FS
+ op.getChildOperators().add(selOp);
+ selOp.getParentOperators().clear();
+ selOp.getParentOperators().add(op);
+
+ // 4. address the colExp, colList, etc for the SEL
+ try {
+ replaceSelectOperatorProcess((SelectOperator)selOp, op);
+ } catch (HiveException e) {
+ throw new SemanticException(e);
+ }
+ }
+
+ @SuppressWarnings("rawtypes")
+ private Operator genSelOpForAnalyze(String analyzeCommand) throws IOException, ParseException, SemanticException{
+ //0. initialization
+ Context ctx = new Context(conf);
+ ParseDriver pd = new ParseDriver();
+ ASTNode tree = pd.parse(analyzeCommand, ctx);
+ tree = ParseUtils.findRootNonNullToken(tree);
+
+ //1. get the ColumnStatsSemanticAnalyzer
+ BaseSemanticAnalyzer baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), tree);
+ ColumnStatsSemanticAnalyzer colSem = (ColumnStatsSemanticAnalyzer) baseSem;
+
+ //2. get the rewritten AST
+ ASTNode ast = colSem.rewriteAST(tree, this);
+ baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), ast);
+ SemanticAnalyzer sem = (SemanticAnalyzer) baseSem;
+ QB qb = new QB(null, null, false);
+ ASTNode child = ast;
+ ParseContext subPCtx = ((SemanticAnalyzer) sem).getParseContext();
+ subPCtx.setContext(ctx);
+ ((SemanticAnalyzer) sem).initParseCtx(subPCtx);
+ sem.doPhase1(child, qb, sem.initPhase1Ctx(), null);
+ // This will trigger new calls to metastore to collect metadata
+ // TODO: cache the information from the metastore
+ sem.getMetaData(qb);
+ Operator<?> operator = sem.genPlan(qb);
+
+ //3. populate the load file work so that ColumnStatsTask can work
+ loadFileWork.addAll(sem.getLoadFileWork());
+
+ //4. because there is only one TS for analyze statement, we can get it.
+ if (sem.topOps.values().size() != 1) {
+ throw new SemanticException(
+ "ColumnStatsAutoGatherContext is expecting exactly one TS, but finds "
+ + sem.topOps.values().size());
+ }
+ operator = sem.topOps.values().iterator().next();
+
+ //5. get the first SEL after TS
+ while(!(operator instanceof SelectOperator)){
+ operator = operator.getChildOperators().get(0);
+ }
+ return operator;
+ }
+
+ /**
+ * @param operator : the select operator in the analyze statement
+ * @param input : the operator right before FS in the insert overwrite statement
+ * @throws HiveException
+ */
+ private void replaceSelectOperatorProcess(SelectOperator operator, Operator<? extends OperatorDesc> input)
+ throws HiveException {
+ RowSchema selRS = operator.getSchema();
+ ArrayList<ColumnInfo> signature = new ArrayList<>();
+ OpParseContext inputCtx = sa.opParseCtx.get(input);
+ RowResolver inputRR = inputCtx.getRowResolver();
+ ArrayList<ColumnInfo> columns = inputRR.getColumnInfos();
+ ArrayList<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
+ ArrayList<String> columnNames = new ArrayList<String>();
+ Map<String, ExprNodeDesc> columnExprMap =
+ new HashMap<String, ExprNodeDesc>();
+ // the column positions in the operator should be like this
+ // <----non-partition columns---->|<--static partition columns-->|<--dynamic partition columns-->
+ // ExprNodeColumnDesc | ExprNodeConstantDesc | ExprNodeColumnDesc
+ // from input | generate itself | from input
+ // |
+
+ // 1. deal with non-partition columns
+ for (int i = 0; i < this.columns.size(); i++) {
+ ColumnInfo col = columns.get(i);
+ ExprNodeDesc exprNodeDesc = new ExprNodeColumnDesc(col);
+ colList.add(exprNodeDesc);
+ String internalName = selRS.getColumnNames().get(i);
+ columnNames.add(internalName);
+ columnExprMap.put(internalName, exprNodeDesc);
+ signature.add(selRS.getSignature().get(i));
+ }
+ // if there is any partition column (in static partition or dynamic
+ // partition or mixed case)
+ int dynamicPartBegin = -1;
+ for (int i = 0; i < partitionColumns.size(); i++) {
+ ExprNodeDesc exprNodeDesc = null;
+ String partColName = partitionColumns.get(i).getName();
+ // 2. deal with static partition columns
+ if (partSpec != null && partSpec.containsKey(partColName)
+ && partSpec.get(partColName) != null) {
+ if (dynamicPartBegin > 0) {
+ throw new SemanticException(
+ "Dynamic partition columns should not come before static partition columns.");
+ }
+ exprNodeDesc = new ExprNodeConstantDesc(partSpec.get(partColName));
+ TypeInfo srcType = exprNodeDesc.getTypeInfo();
+ TypeInfo destType = selRS.getSignature().get(this.columns.size() + i).getType();
+ if (!srcType.equals(destType)) {
+ // This may be possible when srcType is string but destType is integer
+ exprNodeDesc = ParseUtils
+ .createConversionCast(exprNodeDesc, (PrimitiveTypeInfo) destType);
+ }
+ }
+ // 3. dynamic partition columns
+ else {
+ dynamicPartBegin++;
+ ColumnInfo col = columns.get(this.columns.size() + dynamicPartBegin);
+ TypeInfo srcType = col.getType();
+ TypeInfo destType = selRS.getSignature().get(this.columns.size() + i).getType();
+ exprNodeDesc = new ExprNodeColumnDesc(col);
+ if (!srcType.equals(destType)) {
+ exprNodeDesc = ParseUtils
+ .createConversionCast(exprNodeDesc, (PrimitiveTypeInfo) destType);
+ }
+ }
+ colList.add(exprNodeDesc);
+ String internalName = selRS.getColumnNames().get(this.columns.size() + i);
+ columnNames.add(internalName);
+ columnExprMap.put(internalName, exprNodeDesc);
+ signature.add(selRS.getSignature().get(this.columns.size() + i));
+ }
+ operator.setConf(new SelectDesc(colList, columnNames));
+ operator.setColumnExprMap(columnExprMap);
+ selRS.setSignature(signature);
+ operator.setSchema(selRS);
+ }
+
+ public String getCompleteName() {
+ return tbl.getDbName() + "." + tbl.getTableName();
+ }
+
+ public boolean isInsertInto() {
+ return isInsertInto;
+ }
+
+ public static boolean canRunAutogatherStats(Operator curr) {
+ // check the ObjectInspector
+ for (ColumnInfo cinfo : curr.getSchema().getSignature()) {
+ if (cinfo.getIsVirtualCol()) {
+ return false;
+ } else if (cinfo.getObjectInspector().getCategory() != ObjectInspector.Category.PRIMITIVE) {
+ return false;
+ } else {
+ switch (((PrimitiveTypeInfo) cinfo.getType()).getPrimitiveCategory()) {
+ case BOOLEAN:
+ case BYTE:
+ case SHORT:
+ case INT:
+ case LONG:
+ case TIMESTAMP:
+ case FLOAT:
+ case DOUBLE:
+ case STRING:
+ case CHAR:
+ case VARCHAR:
+ case BINARY:
+ case DECIMAL:
+ // TODO: Support case DATE:
+ break;
+ default:
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
index 3b6cbce..d3aef41 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
@@ -110,11 +110,18 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
partValsSpecified += partSpec.get(partKey) == null ? 0 : 1;
}
try {
- if ((partValsSpecified == tbl.getPartitionKeys().size()) && (db.getPartition(tbl, partSpec, false, null, false) == null)) {
- throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
+ // for static partition, it may not exist when HIVESTATSCOLAUTOGATHER is
+ // set to true
+ if (!conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER)) {
+ if ((partValsSpecified == tbl.getPartitionKeys().size())
+ && (db.getPartition(tbl, partSpec, false, null, false) == null)) {
+ throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg()
+ + " : " + partSpec);
+ }
}
} catch (HiveException he) {
- throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
+ throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : "
+ + partSpec);
}
// User might have only specified partial list of partition keys, in which case add other partition keys in partSpec
@@ -157,7 +164,7 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
} else {
groupByClause.append(",");
}
- groupByClause.append(fs.getName());
+ groupByClause.append("`" + fs.getName() + "`");
}
// attach the predicate and group by to the return clause
@@ -235,12 +242,12 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
if (isPartitionStats) {
for (FieldSchema fs : tbl.getPartCols()) {
- rewrittenQueryBuilder.append(" , " + fs.getName());
+ rewrittenQueryBuilder.append(" , `" + fs.getName() + "`");
}
}
- rewrittenQueryBuilder.append(" from ");
+ rewrittenQueryBuilder.append(" from `");
rewrittenQueryBuilder.append(tbl.getDbName());
- rewrittenQueryBuilder.append(".");
+ rewrittenQueryBuilder.append("`.");
rewrittenQueryBuilder.append("`" + tbl.getTableName() + "`");
isRewritten = true;
@@ -378,4 +385,54 @@ public class ColumnStatsSemanticAnalyzer extends SemanticAnalyzer {
analyzeInternal(originalTree);
}
}
+
+ /**
+ * @param ast
+ * is the original analyze ast
+ * @param qb
+ * is the qb that calls this function
+ * @param sem
+ * is the semantic analyzer that calls this function
+ * @return
+ * @throws SemanticException
+ */
+ public ASTNode rewriteAST(ASTNode ast, ColumnStatsAutoGatherContext context)
+ throws SemanticException {
+ tbl = AnalyzeCommandUtils.getTable(ast, this);
+ colNames = getColumnName(ast);
+ // Save away the original AST
+ originalTree = ast;
+ boolean isPartitionStats = AnalyzeCommandUtils.isPartitionLevelStats(ast);
+ Map<String, String> partSpec = null;
+ checkForPartitionColumns(colNames,
+ Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
+ validateSpecifiedColumnNames(colNames);
+ if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {
+ isPartitionStats = true;
+ }
+
+ if (isPartitionStats) {
+ isTableLevel = false;
+ partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf);
+ handlePartialPartitionSpec(partSpec);
+ } else {
+ isTableLevel = true;
+ }
+ colType = getColumnTypes(colNames);
+ int numBitVectors = 0;
+ try {
+ numBitVectors = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
+ } catch (Exception e) {
+ throw new SemanticException(e.getMessage());
+ }
+ rewrittenQuery = genRewrittenQuery(colNames, numBitVectors, partSpec, isPartitionStats);
+ rewrittenTree = genRewrittenTree(rewrittenQuery);
+
+ context.analyzeRewrite = new AnalyzeRewriteContext();
+ context.analyzeRewrite.setTableName(tbl.getDbName() + "." + tbl.getTableName());
+ context.analyzeRewrite.setTblLvl(isTableLevel);
+ context.analyzeRewrite.setColName(colNames);
+ context.analyzeRewrite.setColType(colType);
+ return rewrittenTree;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 96ef20d..b2125ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -80,6 +80,7 @@ public class ParseContext {
private HashMap<String, SplitSample> nameToSplitSample;
private List<LoadTableDesc> loadTableWork;
private List<LoadFileDesc> loadFileWork;
+ private List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts;
private Context ctx;
private QueryState queryState;
private HiveConf conf;
@@ -166,6 +167,7 @@ public class ParseContext {
Set<JoinOperator> joinOps,
Set<SMBMapJoinOperator> smbMapJoinOps,
List<LoadTableDesc> loadTableWork, List<LoadFileDesc> loadFileWork,
+ List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts,
Context ctx, HashMap<String, String> idToTableNameMap, int destTableId,
UnionProcContext uCtx, List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoReducer,
Map<String, PrunedPartitionList> prunedPartitions,
@@ -188,6 +190,7 @@ public class ParseContext {
this.smbMapJoinOps = smbMapJoinOps;
this.loadFileWork = loadFileWork;
this.loadTableWork = loadTableWork;
+ this.columnStatsAutoGatherContexts = columnStatsAutoGatherContexts;
this.topOps = topOps;
this.ctx = ctx;
this.idToTableNameMap = idToTableNameMap;
@@ -608,4 +611,13 @@ public class ParseContext {
public Map<String, Table> getTabNameToTabObject() {
return tabNameToTabObject;
}
+
+ public List<ColumnStatsAutoGatherContext> getColumnStatsAutoGatherContexts() {
+ return columnStatsAutoGatherContexts;
+ }
+
+ public void setColumnStatsAutoGatherContexts(
+ List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts) {
+ this.columnStatsAutoGatherContexts = columnStatsAutoGatherContexts;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
index 3a226e7..3a0402e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
@@ -63,7 +63,9 @@ public class QBParseInfo {
private final Set<String> destCubes;
private final Set<String> destGroupingSets;
private final Map<String, ASTNode> destToHaving;
- private final HashSet<String> insertIntoTables;
+ // insertIntoTables/insertOverwriteTables map a table's fullName to its ast;
+ private final Map<String, ASTNode> insertIntoTables;
+ private final Map<String, ASTNode> insertOverwriteTables;
private boolean isAnalyzeCommand; // used for the analyze command (statistics)
private boolean isNoScanAnalyzeCommand; // used for the analyze command (statistics) (noscan)
@@ -133,7 +135,8 @@ public class QBParseInfo {
destToSortby = new HashMap<String, ASTNode>();
destToOrderby = new HashMap<String, ASTNode>();
destToLimit = new HashMap<String, SimpleEntry<Integer, Integer>>();
- insertIntoTables = new HashSet<String>();
+ insertIntoTables = new HashMap<String, ASTNode>();
+ insertOverwriteTables = new HashMap<String, ASTNode>();
destRollups = new HashSet<String>();
destCubes = new HashSet<String>();
destGroupingSets = new HashSet<String>();
@@ -174,13 +177,13 @@ public class QBParseInfo {
}
}
- public void addInsertIntoTable(String fullName) {
- insertIntoTables.add(fullName.toLowerCase());
+ public void addInsertIntoTable(String fullName, ASTNode ast) {
+ insertIntoTables.put(fullName.toLowerCase(), ast);
}
public boolean isInsertIntoTable(String dbName, String table) {
String fullName = dbName + "." + table;
- return insertIntoTables.contains(fullName.toLowerCase());
+ return insertIntoTables.containsKey(fullName.toLowerCase());
}
/**
@@ -189,7 +192,7 @@ public class QBParseInfo {
* @return
*/
public boolean isInsertIntoTable(String fullTableName) {
- return insertIntoTables.contains(fullTableName.toLowerCase());
+ return insertIntoTables.containsKey(fullTableName.toLowerCase());
}
public HashMap<String, ASTNode> getAggregationExprsForClause(String clause) {
@@ -636,6 +639,11 @@ public class QBParseInfo {
public void setPartialScanAnalyzeCommand(boolean isPartialScanAnalyzeCommand) {
this.isPartialScanAnalyzeCommand = isPartialScanAnalyzeCommand;
}
+
+ public Map<String, ASTNode> getInsertOverwriteTables() {
+ return insertOverwriteTables;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 7162c08..6937308 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
+import org.apache.hadoop.hive.ql.exec.FetchOperator;
import org.apache.hadoop.hive.ql.exec.FetchTask;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
import org.apache.hadoop.hive.ql.exec.FilterOperator;
@@ -129,6 +130,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc;
import org.apache.hadoop.hive.ql.optimizer.lineage.Generator;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType;
import org.apache.hadoop.hive.ql.parse.CalcitePlanner.ASTSearcher;
@@ -182,6 +184,7 @@ import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.PTFDesc;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
import org.apache.hadoop.hive.ql.plan.ScriptDesc;
@@ -259,6 +262,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
protected LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtx;
private List<LoadTableDesc> loadTableWork;
private List<LoadFileDesc> loadFileWork;
+ private List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts;
private final Map<JoinOperator, QBJoinTree> joinContext;
private final Map<SMBMapJoinOperator, QBJoinTree> smbMapJoinContext;
private final HashMap<TableScanOperator, Table> topToTable;
@@ -353,6 +357,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
topOps = new LinkedHashMap<String, TableScanOperator>();
loadTableWork = new ArrayList<LoadTableDesc>();
loadFileWork = new ArrayList<LoadFileDesc>();
+ columnStatsAutoGatherContexts = new ArrayList<ColumnStatsAutoGatherContext>();
opParseCtx = new LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext>();
joinContext = new HashMap<JoinOperator, QBJoinTree>();
smbMapJoinContext = new HashMap<SMBMapJoinOperator, QBJoinTree>();
@@ -390,6 +395,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
tabNameToTabObject.clear();
loadTableWork.clear();
loadFileWork.clear();
+ columnStatsAutoGatherContexts.clear();
topOps.clear();
destTableId = 1;
idToTableNameMap.clear();
@@ -448,7 +454,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
return new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
new HashSet<JoinOperator>(joinContext.keySet()),
new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()),
- loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
+ loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject,
opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
@@ -1401,18 +1407,25 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
case HiveParser.TOK_INSERT_INTO:
String currentDatabase = SessionState.get().getCurrentDatabase();
String tab_name = getUnescapedName((ASTNode) ast.getChild(0).getChild(0), currentDatabase);
- qbp.addInsertIntoTable(tab_name);
+ qbp.addInsertIntoTable(tab_name, ast);
case HiveParser.TOK_DESTINATION:
ctx_1.dest = "insclause-" + ctx_1.nextNum;
ctx_1.nextNum++;
boolean isTmpFileDest = false;
if (ast.getChildCount() > 0 && ast.getChild(0) instanceof ASTNode) {
- ASTNode ch = (ASTNode)ast.getChild(0);
- if (ch.getToken().getType() == HiveParser.TOK_DIR
- && ch.getChildCount() > 0 && ch.getChild(0) instanceof ASTNode) {
- ch = (ASTNode)ch.getChild(0);
+ ASTNode ch = (ASTNode) ast.getChild(0);
+ if (ch.getToken().getType() == HiveParser.TOK_DIR && ch.getChildCount() > 0
+ && ch.getChild(0) instanceof ASTNode) {
+ ch = (ASTNode) ch.getChild(0);
isTmpFileDest = ch.getToken().getType() == HiveParser.TOK_TMP_FILE;
+ } else {
+ if (ast.getToken().getType() == HiveParser.TOK_DESTINATION
+ && ast.getChild(0).getType() == HiveParser.TOK_TAB) {
+ String fullTableName = getUnescapedName((ASTNode) ast.getChild(0).getChild(0),
+ SessionState.get().getCurrentDatabase());
+ qbp.getInsertOverwriteTables().put(fullTableName, ast);
+ }
}
}
@@ -6516,6 +6529,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
DynamicPartitionCtx dpCtx = null;
LoadTableDesc ltd = null;
ListBucketingCtx lbCtx = null;
+ Map<String, String> partSpec = null;
switch (dest_type.intValue()) {
case QBMetaData.DEST_TABLE: {
@@ -6531,7 +6545,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName()));
}
- Map<String, String> partSpec = qbm.getPartSpecForAlias(dest);
+ partSpec = qbm.getPartSpecForAlias(dest);
dest_path = dest_tab.getPath();
// If the query here is an INSERT_INTO and the target is an immutable table,
@@ -6875,6 +6889,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
input = genConversionSelectOperator(dest, qb, input, table_desc, dpCtx);
+
inputRR = opParseCtx.get(input).getRowResolver();
ArrayList<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
@@ -7004,9 +7019,41 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
FileSinkOperator fso = (FileSinkOperator) output;
fso.getConf().setTable(dest_tab);
fsopToTable.put(fso, dest_tab);
+ // the following code is used to collect column stats when
+ // hive.stats.autogather=true
+ // and it is an insert overwrite or insert into table
+ if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)
+ && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER)
+ && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
+ if (dest_type.intValue() == QBMetaData.DEST_TABLE) {
+ genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo()
+ .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
+ } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) {
+ genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb
+ .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
+
+ }
+ }
return output;
}
+ private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc,
+ Map<String, String> partSpec, Operator curr, boolean isInsertInto) throws SemanticException {
+ String tableName = table_desc.getTableName();
+ Table table = null;
+ try {
+ table = db.getTable(tableName);
+ } catch (HiveException e) {
+ throw new SemanticException(e.getMessage());
+ }
+ LOG.info("Generate an operator pipleline to autogather column stats for table " + tableName
+ + " in query " + ctx.getCmd());
+ ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null;
+ columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto);
+ columnStatsAutoGatherContext.insertAnalyzePipeline();
+ columnStatsAutoGatherContexts.add(columnStatsAutoGatherContext);
+ }
+
String fixCtasColumnName(String colName) {
return colName;
}
@@ -10689,7 +10736,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
new HashSet<JoinOperator>(joinContext.keySet()),
new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()),
- loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
+ loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx,
listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner,
globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner,
viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
@@ -12895,4 +12942,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String.format("Warning: %s", msg));
}
+ public List<LoadFileDesc> getLoadFileWork() {
+ return loadFileWork;
+ }
+
+ public void setLoadFileWork(List<LoadFileDesc> loadFileWork) {
+ this.loadFileWork = loadFileWork;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 4049f40..4b34ebf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -20,15 +20,19 @@ package org.apache.hadoop.hive.ql.parse;
import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
+import java.util.LinkedList;
import java.util.List;
+import java.util.Queue;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -69,6 +73,8 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.thrift.ThriftFormatter;
import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe;
+import akka.util.Collections;
+
import com.google.common.collect.Interner;
import com.google.common.collect.Interners;
@@ -251,15 +257,6 @@ public abstract class TaskCompiler {
generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs);
- /*
- * If the query was the result of analyze table column compute statistics rewrite, create
- * a column stats task instead of a fetch task to persist stats to the metastore.
- */
- if (isCStats) {
- genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadTableWork, loadFileWork,
- rootTasks, outerQueryLimit);
- }
-
// For each task, set the key descriptor for the reducer
for (Task<? extends Serializable> rootTask : rootTasks) {
GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
@@ -273,6 +270,35 @@ public abstract class TaskCompiler {
optimizeTaskPlan(rootTasks, pCtx, ctx);
+ /*
+ * If the query was the result of analyze table column compute statistics rewrite, create
+ * a column stats task instead of a fetch task to persist stats to the metastore.
+ */
+ if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
+ Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
+ getLeafTasks(rootTasks, leafTasks);
+ if (isCStats) {
+ genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0);
+ } else {
+ for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx
+ .getColumnStatsAutoGatherContexts()) {
+ if (!columnStatsAutoGatherContext.isInsertInto()) {
+ genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
+ columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0);
+ } else {
+ int numBitVector;
+ try {
+ numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
+ } catch (Exception e) {
+ throw new SemanticException(e.getMessage());
+ }
+ genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
+ columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector);
+ }
+ }
+ }
+ }
+
decideExecMode(rootTasks, ctx, globalLimitCtx);
if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) {
@@ -355,8 +381,9 @@ public abstract class TaskCompiler {
* @param qb
*/
@SuppressWarnings("unchecked")
- protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite, List<LoadTableDesc> loadTableWork,
- List<LoadFileDesc> loadFileWork, List<Task<? extends Serializable>> rootTasks, int outerQueryLimit) {
+ protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite,
+ List<LoadFileDesc> loadFileWork, Set<Task<? extends Serializable>> leafTasks,
+ int outerQueryLimit, int numBitVector) {
ColumnStatsTask cStatsTask = null;
ColumnStatsWork cStatsWork = null;
FetchWork fetch = null;
@@ -385,12 +412,12 @@ public abstract class TaskCompiler {
fetch = new FetchWork(loadFileWork.get(0).getSourcePath(), resultTab, outerQueryLimit);
ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tableName,
- colName, colType, isTblLevel);
+ colName, colType, isTblLevel, numBitVector);
cStatsWork = new ColumnStatsWork(fetch, cStatsDesc);
cStatsTask = (ColumnStatsTask) TaskFactory.get(cStatsWork, conf);
- // This is a column stats task. According to the semantic, there should be
- // only one MR task in the rootTask.
- rootTasks.get(0).addDependentTask(cStatsTask);
+ for (Task<? extends Serializable> tsk : leafTasks) {
+ tsk.addDependentTask(cStatsTask);
+ }
}
@@ -398,7 +425,7 @@ public abstract class TaskCompiler {
* Find all leaf tasks of the list of root tasks.
*/
protected void getLeafTasks(List<Task<? extends Serializable>> rootTasks,
- HashSet<Task<? extends Serializable>> leaves) {
+ Set<Task<? extends Serializable>> leaves) {
for (Task<? extends Serializable> root : rootTasks) {
getLeafTasks(root, leaves);
@@ -406,7 +433,7 @@ public abstract class TaskCompiler {
}
private void getLeafTasks(Task<? extends Serializable> task,
- HashSet<Task<? extends Serializable>> leaves) {
+ Set<Task<? extends Serializable>> leaves) {
if (task.getDependentTasks() == null) {
if (!leaves.contains(task)) {
leaves.add(task);
@@ -453,7 +480,7 @@ public abstract class TaskCompiler {
ParseContext clone = new ParseContext(queryState,
pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(),
pCtx.getJoinOps(), pCtx.getSmbMapJoinOps(),
- pCtx.getLoadTableWork(), pCtx.getLoadFileWork(), pCtx.getContext(),
+ pCtx.getLoadTableWork(), pCtx.getLoadFileWork(), pCtx.getColumnStatsAutoGatherContexts(), pCtx.getContext(),
pCtx.getIdToTableNameMap(), pCtx.getDestTableId(), pCtx.getUCtx(),
pCtx.getListMapJoinOpsNoReducer(),
pCtx.getPrunedPartitions(), pCtx.getTabNameToTabObject(), pCtx.getOpToSamplePruner(), pCtx.getGlobalLimitCtx(),
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java
index 9934fdf..778d6f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java
@@ -51,7 +51,7 @@ public class ConditionalResolverSkewJoin implements ConditionalResolver, Seriali
// this map stores mapping from "big key dir" to its corresponding mapjoin
// task.
private HashMap<Path, Task<? extends Serializable>> dirToTaskMap;
- private Task<? extends Serializable> noSkewTask;
+ private List<Task<? extends Serializable>> noSkewTask;
/**
* For serialization use only.
@@ -61,7 +61,7 @@ public class ConditionalResolverSkewJoin implements ConditionalResolver, Seriali
public ConditionalResolverSkewJoinCtx(
HashMap<Path, Task<? extends Serializable>> dirToTaskMap,
- Task<? extends Serializable> noSkewTask) {
+ List<Task<? extends Serializable>> noSkewTask) {
super();
this.dirToTaskMap = dirToTaskMap;
this.noSkewTask = noSkewTask;
@@ -76,11 +76,11 @@ public class ConditionalResolverSkewJoin implements ConditionalResolver, Seriali
this.dirToTaskMap = dirToTaskMap;
}
- public Task<? extends Serializable> getNoSkewTask() {
+ public List<Task<? extends Serializable>> getNoSkewTask() {
return noSkewTask;
}
- public void setNoSkewTask(Task<? extends Serializable> noSkewTask) {
+ public void setNoSkewTask(List<Task<? extends Serializable>> noSkewTask) {
this.noSkewTask = noSkewTask;
}
}
@@ -121,7 +121,7 @@ public class ConditionalResolverSkewJoin implements ConditionalResolver, Seriali
e.printStackTrace();
}
if (resTsks.isEmpty() && ctx.getNoSkewTask() != null) {
- resTsks.add(ctx.getNoSkewTask());
+ resTsks.addAll(ctx.getNoSkewTask());
}
return resTsks;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_1.q b/ql/src/test/queries/clientpositive/autoColumnStats_1.q
new file mode 100644
index 0000000..bb7252a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_1.q
@@ -0,0 +1,192 @@
+set hive.stats.column.autogather=true;
+set hive.stats.fetch.column.stats=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.auto.convert.join=true;
+set hive.join.emit.interval=2;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
+set hive.optimize.bucketingsorting=false;
+
+drop table src_multi1;
+
+create table src_multi1 like src;
+
+insert overwrite table src_multi1 select * from src;
+
+explain extended select * from src_multi1;
+
+describe formatted src_multi1;
+
+drop table a;
+drop table b;
+create table a like src;
+create table b like src;
+
+from src
+insert overwrite table a select *
+insert overwrite table b select *;
+
+describe formatted a;
+describe formatted b;
+
+drop table a;
+drop table b;
+create table a like src;
+create table b like src;
+
+from src
+insert overwrite table a select *
+insert into table b select *;
+
+describe formatted a;
+describe formatted b;
+
+
+drop table src_multi2;
+
+create table src_multi2 like src;
+
+insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key;
+
+describe formatted src_multi2;
+
+
+drop table nzhang_part14;
+
+create table if not exists nzhang_part14 (key string)
+ partitioned by (value string);
+
+insert overwrite table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T;
+
+explain select key from nzhang_part14;
+
+
+drop table src5;
+
+create table src5 as select key, value from src limit 5;
+
+insert overwrite table nzhang_part14 partition(value)
+select key, value from src5;
+
+explain select key from nzhang_part14;
+
+
+create table alter5 ( col1 string ) partitioned by (dt string);
+
+alter table alter5 add partition (dt='a') location 'parta';
+
+describe formatted alter5 partition (dt='a');
+
+insert overwrite table alter5 partition (dt='a') select key from src ;
+
+describe formatted alter5 partition (dt='a');
+
+explain select * from alter5 where dt='a';
+
+
+drop table src_stat_part;
+create table src_stat_part(key string, value string) partitioned by (partitionId int);
+
+insert overwrite table src_stat_part partition (partitionId=1)
+select * from src1 limit 5;
+
+describe formatted src_stat_part PARTITION(partitionId=1);
+
+insert overwrite table src_stat_part partition (partitionId=2)
+select * from src1;
+
+describe formatted src_stat_part PARTITION(partitionId=2);
+
+drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+drop table tab_part;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+drop table srcbucket_mapjoin_part;
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+
+insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part;
+
+describe formatted tab_part partition (ds='2008-04-08');
+
+CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin;
+
+describe formatted tab partition (ds='2008-04-08');
+
+drop table nzhang_part14;
+
+create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string);
+
+describe formatted nzhang_part14;
+
+insert overwrite table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T;
+
+desc formatted nzhang_part14 partition(ds='1', hr='3');
+
+
+INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10;
+
+desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12');
+
+
+drop table nzhang_part14;
+create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string);
+
+INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10;
+
+desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12');
+
+drop table a;
+create table a (key string, value string)
+partitioned by (ds string, hr string);
+
+drop table b;
+create table b (key string, value string)
+partitioned by (ds string, hr string);
+
+drop table c;
+create table c (key string, value string)
+partitioned by (ds string, hr string);
+
+
+FROM srcpart
+INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0;
+
+explain select key from a;
+explain select value from b;
+explain select key from b;
+explain select value from c;
+explain select key from c;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_2.q b/ql/src/test/queries/clientpositive/autoColumnStats_2.q
new file mode 100644
index 0000000..c1abcb1
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_2.q
@@ -0,0 +1,214 @@
+set hive.stats.column.autogather=true;
+set hive.stats.fetch.column.stats=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.auto.convert.join=true;
+set hive.join.emit.interval=2;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
+set hive.optimize.bucketingsorting=false;
+
+drop table src_multi1;
+
+create table src_multi1 like src;
+
+insert into table src_multi1 select * from src;
+
+explain extended select * from src_multi1;
+
+describe formatted src_multi1;
+
+drop table a;
+drop table b;
+create table a like src;
+create table b like src;
+
+from src
+insert into table a select *
+insert into table b select *;
+
+describe formatted a key;
+describe formatted b key;
+
+from src
+insert overwrite table a select *
+insert into table b select *;
+
+describe formatted a;
+describe formatted b;
+
+describe formatted b key;
+describe formatted b value;
+
+insert into table b select NULL, NULL from src limit 10;
+
+describe formatted b key;
+describe formatted b value;
+
+insert into table b(value) select key+100000 from src limit 10;
+
+describe formatted b key;
+describe formatted b value;
+
+drop table src_multi2;
+
+create table src_multi2 like src;
+
+insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key;
+
+describe formatted src_multi2;
+
+
+drop table nzhang_part14;
+
+create table if not exists nzhang_part14 (key string)
+ partitioned by (value string);
+
+insert into table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T;
+
+explain select key from nzhang_part14;
+
+
+drop table src5;
+
+create table src5 as select key, value from src limit 5;
+
+insert into table nzhang_part14 partition(value)
+select key, value from src5;
+
+explain select key from nzhang_part14;
+
+drop table alter5;
+
+create table alter5 ( col1 string ) partitioned by (dt string);
+
+alter table alter5 add partition (dt='a');
+
+describe formatted alter5 partition (dt='a');
+
+insert into table alter5 partition (dt='a') select key from src ;
+
+describe formatted alter5 partition (dt='a');
+
+explain select * from alter5 where dt='a';
+
+drop table alter5;
+
+create table alter5 ( col1 string ) partitioned by (dt string);
+
+alter table alter5 add partition (dt='a') location 'parta';
+
+describe formatted alter5 partition (dt='a');
+
+insert into table alter5 partition (dt='a') select key from src ;
+
+describe formatted alter5 partition (dt='a');
+
+explain select * from alter5 where dt='a';
+
+
+drop table src_stat_part;
+create table src_stat_part(key string, value string) partitioned by (partitionId int);
+
+insert into table src_stat_part partition (partitionId=1)
+select * from src1 limit 5;
+
+describe formatted src_stat_part PARTITION(partitionId=1);
+
+insert into table src_stat_part partition (partitionId=2)
+select * from src1;
+
+describe formatted src_stat_part PARTITION(partitionId=2);
+
+drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+drop table tab_part;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+drop table srcbucket_mapjoin_part;
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+
+insert into table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part;
+
+describe formatted tab_part partition (ds='2008-04-08');
+
+CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert into table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin;
+
+describe formatted tab partition (ds='2008-04-08');
+
+drop table nzhang_part14;
+
+create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string);
+
+describe formatted nzhang_part14;
+
+insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T;
+
+desc formatted nzhang_part14 partition(ds='1', hr='3');
+
+
+INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10;
+
+desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12');
+
+
+drop table nzhang_part14;
+create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string);
+
+INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10;
+
+desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12');
+
+drop table a;
+create table a (key string, value string)
+partitioned by (ds string, hr string);
+
+drop table b;
+create table b (key string, value string)
+partitioned by (ds string, hr string);
+
+drop table c;
+create table c (key string, value string)
+partitioned by (ds string, hr string);
+
+
+FROM srcpart
+INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0;
+
+explain select key from a;
+explain select value from b;
+explain select key from b;
+explain select value from c;
+explain select key from c;
+
[6/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_3.q b/ql/src/test/queries/clientpositive/autoColumnStats_3.q
new file mode 100644
index 0000000..2ddd981
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_3.q
@@ -0,0 +1,67 @@
+set hive.stats.column.autogather=false;
+set hive.stats.fetch.column.stats=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.auto.convert.join=true;
+set hive.join.emit.interval=2;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
+set hive.optimize.bucketingsorting=false;
+
+drop table src_multi1;
+
+create table src_multi1 like src;
+
+analyze table src_multi1 compute statistics for columns key;
+
+describe formatted src_multi1;
+
+set hive.stats.column.autogather=true;
+
+insert into table src_multi1 select * from src;
+
+describe formatted src_multi1;
+
+
+set hive.stats.column.autogather=false;
+
+drop table nzhang_part14;
+
+create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string);
+
+describe formatted nzhang_part14;
+
+insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T;
+
+desc formatted nzhang_part14 partition(ds='1', hr='3');
+
+analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value;
+
+desc formatted nzhang_part14 partition(ds='1', hr='3');
+
+desc formatted nzhang_part14 partition(ds='2', hr='1');
+
+set hive.stats.column.autogather=true;
+
+insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T;
+
+desc formatted nzhang_part14 partition(ds='1', hr='3');
+
+desc formatted nzhang_part14 partition(ds='2', hr='1');
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_4.q b/ql/src/test/queries/clientpositive/autoColumnStats_4.q
new file mode 100644
index 0000000..9780a75
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_4.q
@@ -0,0 +1,20 @@
+set hive.stats.column.autogather=true;
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+desc formatted acid_dtt;
+
+explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
+
+insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
+
+desc formatted acid_dtt;
+
+delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr';
+
+desc formatted acid_dtt;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_5.q b/ql/src/test/queries/clientpositive/autoColumnStats_5.q
new file mode 100644
index 0000000..d59099c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_5.q
@@ -0,0 +1,47 @@
+set hive.stats.column.autogather=true;
+set hive.mapred.mode=nonstrict;
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+desc formatted partitioned1 partition(part=1);
+
+desc formatted partitioned1 partition(part=1) a;
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+desc formatted partitioned1 partition(part=1);
+
+explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+desc formatted partitioned1 partition(part=2);
+
+desc formatted partitioned1 partition(part=2) c;
+
+explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+desc formatted partitioned1 partition(part=1);
+
+desc formatted partitioned1 partition(part=1) a;
+
+desc formatted partitioned1 partition(part=1) c;
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_6.q b/ql/src/test/queries/clientpositive/autoColumnStats_6.q
new file mode 100644
index 0000000..45e5daa
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_6.q
@@ -0,0 +1,41 @@
+set hive.stats.column.autogather=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.merge.orcfile.stripe.level=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.merge.sparkfiles=true;
+
+DROP TABLE orcfile_merge2a;
+
+CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC;
+
+EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src;
+
+INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/;
+
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t;
+
+DROP TABLE orcfile_merge2a;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_7.q b/ql/src/test/queries/clientpositive/autoColumnStats_7.q
new file mode 100644
index 0000000..2227685
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_7.q
@@ -0,0 +1,19 @@
+set hive.stats.column.autogather=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+set hive.map.aggr=false;
+set hive.groupby.skewindata=true;
+
+-- Taken from groupby2.q
+CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
+CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src;
+
+explain FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1);
+
+FROM src_temp
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1);
+
+SELECT dest_g2.* FROM dest_g2;
+
+DROP TABLE dest_g2;
+DROP TABLE src_temp;
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_8.q b/ql/src/test/queries/clientpositive/autoColumnStats_8.q
new file mode 100644
index 0000000..42d070a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_8.q
@@ -0,0 +1,27 @@
+set hive.stats.column.autogather=true;
+-- SORT_QUERY_RESULTS
+
+show partitions srcpart;
+
+
+
+create table if not exists nzhang_part8 like srcpart;
+describe extended nzhang_part8;
+
+set hive.merge.mapfiles=false;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+explain extended
+from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08';
+
+from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08';
+
+show partitions nzhang_part8;
+
+select * from nzhang_part8 where ds is not null and hr is not null;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/queries/clientpositive/autoColumnStats_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_9.q b/ql/src/test/queries/clientpositive/autoColumnStats_9.q
new file mode 100644
index 0000000..85f85bc
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_9.q
@@ -0,0 +1,22 @@
+set hive.stats.column.autogather=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.optimize.skewjoin = true;
+set hive.skewjoin.key = 2;
+
+-- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
+
+EXPLAIN
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+
+desc formatted dest_j1;
+
+desc formatted dest_j1 key;
+
+desc formatted dest_j1 value;
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
new file mode 100644
index 0000000..e290e52
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
@@ -0,0 +1,1379 @@
+PREHOOK: query: drop table src_multi1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: insert overwrite table src_multi1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: insert overwrite table src_multi1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended select * from src_multi1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select * from src_multi1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: src_multi1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert overwrite table a select *
+insert overwrite table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert overwrite table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table src_multi2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi2 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_multi2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi2
+POSTHOOK: query: describe formatted src_multi2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi2
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 508
+ rawDataSize 5400
+ totalSize 5908
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part14 (key string)
+ partitioned by (value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string)
+ partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@value=
+POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: nzhang_part14
+ Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table src5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src5 as select key, value from src limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src5
+POSTHOOK: query: create table src5 as select key, value from src limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from src5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src5
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from src5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src5
+POSTHOOK: Output: default@nzhang_part14@value=val_165
+POSTHOOK: Output: default@nzhang_part14@value=val_238
+POSTHOOK: Output: default@nzhang_part14@value=val_27
+POSTHOOK: Output: default@nzhang_part14@value=val_311
+POSTHOOK: Output: default@nzhang_part14@value=val_86
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: nzhang_part14
+ Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 1406
+ totalSize 1906
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: alter5
+ Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: col1 (type: string), 'a' (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table src_stat_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_stat_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_stat_part
+POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_stat_part
+PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+partitionid int
+
+# Detailed Partition Information
+Partition Value: [1]
+Database: default
+Table: src_stat_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 5
+ rawDataSize 38
+ totalSize 43
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2)
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2)
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+partitionid int
+
+# Detailed Partition Information
+Partition Value: [2]
+Database: default
+Table: src_stat_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 25
+ rawDataSize 191
+ totalSize 216
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table srcbucket_mapjoin
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: drop table tab_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tab_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: drop table srcbucket_mapjoin_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab_part
+POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab_part
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2008-04-08]
+Database: default
+Table: tab_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 4
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [key]
+Sort Columns: [Order(col:key, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab
+POSTHOOK: query: describe formatted tab partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2008-04-08]
+Database: default
+Table: tab
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 242
+ rawDataSize 2566
+ totalSize 2808
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 2
+Bucket Columns: [key]
+Sort Columns: [Order(col:key, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: describe formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [1, 3]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 6
+ totalSize 8
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2010-03-03, 12]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 1000
+ rawDataSize 10624
+ totalSize 11624
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2010-03-03, 12]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 1000
+ rawDataSize 10624
+ totalSize 11624
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: drop table c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@c
+POSTHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@c
+PREHOOK: query: FROM srcpart
+INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@a@ds=2010-03-11
+PREHOOK: Output: default@b@ds=2010-04-11
+PREHOOK: Output: default@c@ds=2010-05-11
+POSTHOOK: query: FROM srcpart
+INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=11
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=12
+POSTHOOK: Output: default@b@ds=2010-04-11/hr=12
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=11
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=12
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain select key from a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select value from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select key from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select value from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select key from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
[2/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
new file mode 100644
index 0000000..5b74d2d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
@@ -0,0 +1,2624 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@srcpart
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@srcpart
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-04-09/hr=11
+ds=2008-04-09/hr=12
+PREHOOK: query: create table if not exists nzhang_part8 like srcpart
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part8
+POSTHOOK: query: create table if not exists nzhang_part8 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part8
+PREHOOK: query: describe extended nzhang_part8
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part8
+POSTHOOK: query: describe extended nzhang_part8
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part8
+key string default
+value string default
+ds string
+hr string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+#### A masked pattern was here ####
+PREHOOK: query: explain extended
+from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-6 depends on stages: Stage-3, Stage-4, Stage-5
+ Stage-7 depends on stages: Stage-3, Stage-4, Stage-5
+ Stage-1 depends on stages: Stage-2
+ Stage-4 depends on stages: Stage-1
+ Stage-5 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: (ds <= '2008-04-08') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.nzhang_part8
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct nzhang_part8 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part8
+ TotalFiles: 1
+ GatherStats: true
+ MultiFileSpray: false
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ outputColumnNames: key, value, ds, hr
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(key, 16), compute_stats(value, 16)
+ keys: ds (type: string), hr (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ null sort order: aa
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ auto parallelism: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: (ds > '2008-04-08') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 2
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Static Partition Specification: ds=2008-12-31/
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.nzhang_part8
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct nzhang_part8 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part8
+ TotalFiles: 1
+ GatherStats: true
+ MultiFileSpray: false
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), '2008-12-31' (type: string), _col2 (type: string)
+ outputColumnNames: key, value, ds, hr
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: compute_stats(key, 16), compute_stats(value, 16)
+ keys: '2008-12-31' (type: string), hr (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types string,string,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+ Truncated Path -> Alias:
+ /srcpart/ds=2008-04-08/hr=11 [srcpart]
+ /srcpart/ds=2008-04-08/hr=12 [srcpart]
+ /srcpart/ds=2008-04-09/hr=11 [srcpart]
+ /srcpart/ds=2008-04-09/hr=12 [srcpart]
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.escape.crlf true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds
+ hr
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.nzhang_part8
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct nzhang_part8 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part8
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+ Stage: Stage-6
+ Column Stats Work
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: string, string
+ Table: default.nzhang_part8
+ Is Table Level Stats: false
+
+ Stage: Stage-7
+ Column Stats Work
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: string, string
+ Table: default.nzhang_part8
+ Is Table Level Stats: false
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ ds 2008-12-31
+ hr
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.nzhang_part8
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct nzhang_part8 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part8
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ GatherStats: false
+ Reduce Output Operator
+ key expressions: '2008-12-31' (type: string), _col1 (type: string)
+ null sort order: aa
+ sort order: ++
+ Map-reduce partition columns: '2008-12-31' (type: string), _col1 (type: string)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: -mr-10004
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types string,string,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types string,string,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>,struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ Truncated Path -> Alias:
+#### A masked pattern was here ####
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ keys: '2008-12-31' (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), '2008-12-31' (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.escape.crlf true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+PREHOOK: query: from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part8
+PREHOOK: Output: default@nzhang_part8@ds=2008-12-31
+POSTHOOK: query: from srcpart
+insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=11
+POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=12
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show partitions nzhang_part8
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part8
+POSTHOOK: query: show partitions nzhang_part8
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part8
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-12-31/hr=11
+ds=2008-12-31/hr=12
+PREHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part8
+PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11
+PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12
+PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11
+PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part8
+POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11
+POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12
+#### A masked pattern was here ####
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 12
+0 val_0 2008-12-31 12
+0 val_0 2008-12-31 12
+10 val_10 2008-04-08 11
+10 val_10 2008-04-08 12
+10 val_10 2008-12-31 11
+10 val_10 2008-12-31 12
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 12
+100 val_100 2008-04-08 12
+100 val_100 2008-12-31 11
+100 val_100 2008-12-31 11
+100 val_100 2008-12-31 12
+100 val_100 2008-12-31 12
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 12
+103 val_103 2008-04-08 12
+103 val_103 2008-12-31 11
+103 val_103 2008-12-31 11
+103 val_103 2008-12-31 12
+103 val_103 2008-12-31 12
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 12
+104 val_104 2008-04-08 12
+104 val_104 2008-12-31 11
+104 val_104 2008-12-31 11
+104 val_104 2008-12-31 12
+104 val_104 2008-12-31 12
+105 val_105 2008-04-08 11
+105 val_105 2008-04-08 12
+105 val_105 2008-12-31 11
+105 val_105 2008-12-31 12
+11 val_11 2008-04-08 11
+11 val_11 2008-04-08 12
+11 val_11 2008-12-31 11
+11 val_11 2008-12-31 12
+111 val_111 2008-04-08 11
+111 val_111 2008-04-08 12
+111 val_111 2008-12-31 11
+111 val_111 2008-12-31 12
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 12
+113 val_113 2008-04-08 12
+113 val_113 2008-12-31 11
+113 val_113 2008-12-31 11
+113 val_113 2008-12-31 12
+113 val_113 2008-12-31 12
+114 val_114 2008-04-08 11
+114 val_114 2008-04-08 12
+114 val_114 2008-12-31 11
+114 val_114 2008-12-31 12
+116 val_116 2008-04-08 11
+116 val_116 2008-04-08 12
+116 val_116 2008-12-31 11
+116 val_116 2008-12-31 12
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 12
+118 val_118 2008-04-08 12
+118 val_118 2008-12-31 11
+118 val_118 2008-12-31 11
+118 val_118 2008-12-31 12
+118 val_118 2008-12-31 12
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 12
+119 val_119 2008-12-31 12
+119 val_119 2008-12-31 12
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 12
+12 val_12 2008-04-08 12
+12 val_12 2008-12-31 11
+12 val_12 2008-12-31 11
+12 val_12 2008-12-31 12
+12 val_12 2008-12-31 12
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 12
+120 val_120 2008-04-08 12
+120 val_120 2008-12-31 11
+120 val_120 2008-12-31 11
+120 val_120 2008-12-31 12
+120 val_120 2008-12-31 12
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 12
+125 val_125 2008-04-08 12
+125 val_125 2008-12-31 11
+125 val_125 2008-12-31 11
+125 val_125 2008-12-31 12
+125 val_125 2008-12-31 12
+126 val_126 2008-04-08 11
+126 val_126 2008-04-08 12
+126 val_126 2008-12-31 11
+126 val_126 2008-12-31 12
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 12
+128 val_128 2008-12-31 12
+128 val_128 2008-12-31 12
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 12
+129 val_129 2008-04-08 12
+129 val_129 2008-12-31 11
+129 val_129 2008-12-31 11
+129 val_129 2008-12-31 12
+129 val_129 2008-12-31 12
+131 val_131 2008-04-08 11
+131 val_131 2008-04-08 12
+131 val_131 2008-12-31 11
+131 val_131 2008-12-31 12
+133 val_133 2008-04-08 11
+133 val_133 2008-04-08 12
+133 val_133 2008-12-31 11
+133 val_133 2008-12-31 12
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 12
+134 val_134 2008-04-08 12
+134 val_134 2008-12-31 11
+134 val_134 2008-12-31 11
+134 val_134 2008-12-31 12
+134 val_134 2008-12-31 12
+136 val_136 2008-04-08 11
+136 val_136 2008-04-08 12
+136 val_136 2008-12-31 11
+136 val_136 2008-12-31 12
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 12
+137 val_137 2008-04-08 12
+137 val_137 2008-12-31 11
+137 val_137 2008-12-31 11
+137 val_137 2008-12-31 12
+137 val_137 2008-12-31 12
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+143 val_143 2008-04-08 11
+143 val_143 2008-04-08 12
+143 val_143 2008-12-31 11
+143 val_143 2008-12-31 12
+145 val_145 2008-04-08 11
+145 val_145 2008-04-08 12
+145 val_145 2008-12-31 11
+145 val_145 2008-12-31 12
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 12
+146 val_146 2008-04-08 12
+146 val_146 2008-12-31 11
+146 val_146 2008-12-31 11
+146 val_146 2008-12-31 12
+146 val_146 2008-12-31 12
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 12
+149 val_149 2008-04-08 12
+149 val_149 2008-12-31 11
+149 val_149 2008-12-31 11
+149 val_149 2008-12-31 12
+149 val_149 2008-12-31 12
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 12
+15 val_15 2008-04-08 12
+15 val_15 2008-12-31 11
+15 val_15 2008-12-31 11
+15 val_15 2008-12-31 12
+15 val_15 2008-12-31 12
+150 val_150 2008-04-08 11
+150 val_150 2008-04-08 12
+150 val_150 2008-12-31 11
+150 val_150 2008-12-31 12
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 12
+152 val_152 2008-04-08 12
+152 val_152 2008-12-31 11
+152 val_152 2008-12-31 11
+152 val_152 2008-12-31 12
+152 val_152 2008-12-31 12
+153 val_153 2008-04-08 11
+153 val_153 2008-04-08 12
+153 val_153 2008-12-31 11
+153 val_153 2008-12-31 12
+155 val_155 2008-04-08 11
+155 val_155 2008-04-08 12
+155 val_155 2008-12-31 11
+155 val_155 2008-12-31 12
+156 val_156 2008-04-08 11
+156 val_156 2008-04-08 12
+156 val_156 2008-12-31 11
+156 val_156 2008-12-31 12
+157 val_157 2008-04-08 11
+157 val_157 2008-04-08 12
+157 val_157 2008-12-31 11
+157 val_157 2008-12-31 12
+158 val_158 2008-04-08 11
+158 val_158 2008-04-08 12
+158 val_158 2008-12-31 11
+158 val_158 2008-12-31 12
+160 val_160 2008-04-08 11
+160 val_160 2008-04-08 12
+160 val_160 2008-12-31 11
+160 val_160 2008-12-31 12
+162 val_162 2008-04-08 11
+162 val_162 2008-04-08 12
+162 val_162 2008-12-31 11
+162 val_162 2008-12-31 12
+163 val_163 2008-04-08 11
+163 val_163 2008-04-08 12
+163 val_163 2008-12-31 11
+163 val_163 2008-12-31 12
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 12
+164 val_164 2008-04-08 12
+164 val_164 2008-12-31 11
+164 val_164 2008-12-31 11
+164 val_164 2008-12-31 12
+164 val_164 2008-12-31 12
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 12
+165 val_165 2008-04-08 12
+165 val_165 2008-12-31 11
+165 val_165 2008-12-31 11
+165 val_165 2008-12-31 12
+165 val_165 2008-12-31 12
+166 val_166 2008-04-08 11
+166 val_166 2008-04-08 12
+166 val_166 2008-12-31 11
+166 val_166 2008-12-31 12
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 12
+167 val_167 2008-12-31 12
+167 val_167 2008-12-31 12
+168 val_168 2008-04-08 11
+168 val_168 2008-04-08 12
+168 val_168 2008-12-31 11
+168 val_168 2008-12-31 12
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+17 val_17 2008-04-08 11
+17 val_17 2008-04-08 12
+17 val_17 2008-12-31 11
+17 val_17 2008-12-31 12
+170 val_170 2008-04-08 11
+170 val_170 2008-04-08 12
+170 val_170 2008-12-31 11
+170 val_170 2008-12-31 12
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 12
+172 val_172 2008-04-08 12
+172 val_172 2008-12-31 11
+172 val_172 2008-12-31 11
+172 val_172 2008-12-31 12
+172 val_172 2008-12-31 12
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 12
+174 val_174 2008-04-08 12
+174 val_174 2008-12-31 11
+174 val_174 2008-12-31 11
+174 val_174 2008-12-31 12
+174 val_174 2008-12-31 12
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 12
+175 val_175 2008-04-08 12
+175 val_175 2008-12-31 11
+175 val_175 2008-12-31 11
+175 val_175 2008-12-31 12
+175 val_175 2008-12-31 12
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 12
+176 val_176 2008-04-08 12
+176 val_176 2008-12-31 11
+176 val_176 2008-12-31 11
+176 val_176 2008-12-31 12
+176 val_176 2008-12-31 12
+177 val_177 2008-04-08 11
+177 val_177 2008-04-08 12
+177 val_177 2008-12-31 11
+177 val_177 2008-12-31 12
+178 val_178 2008-04-08 11
+178 val_178 2008-04-08 12
+178 val_178 2008-12-31 11
+178 val_178 2008-12-31 12
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 12
+179 val_179 2008-04-08 12
+179 val_179 2008-12-31 11
+179 val_179 2008-12-31 11
+179 val_179 2008-12-31 12
+179 val_179 2008-12-31 12
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 12
+18 val_18 2008-04-08 12
+18 val_18 2008-12-31 11
+18 val_18 2008-12-31 11
+18 val_18 2008-12-31 12
+18 val_18 2008-12-31 12
+180 val_180 2008-04-08 11
+180 val_180 2008-04-08 12
+180 val_180 2008-12-31 11
+180 val_180 2008-12-31 12
+181 val_181 2008-04-08 11
+181 val_181 2008-04-08 12
+181 val_181 2008-12-31 11
+181 val_181 2008-12-31 12
+183 val_183 2008-04-08 11
+183 val_183 2008-04-08 12
+183 val_183 2008-12-31 11
+183 val_183 2008-12-31 12
+186 val_186 2008-04-08 11
+186 val_186 2008-04-08 12
+186 val_186 2008-12-31 11
+186 val_186 2008-12-31 12
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 12
+187 val_187 2008-12-31 12
+187 val_187 2008-12-31 12
+189 val_189 2008-04-08 11
+189 val_189 2008-04-08 12
+189 val_189 2008-12-31 11
+189 val_189 2008-12-31 12
+19 val_19 2008-04-08 11
+19 val_19 2008-04-08 12
+19 val_19 2008-12-31 11
+19 val_19 2008-12-31 12
+190 val_190 2008-04-08 11
+190 val_190 2008-04-08 12
+190 val_190 2008-12-31 11
+190 val_190 2008-12-31 12
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 12
+191 val_191 2008-04-08 12
+191 val_191 2008-12-31 11
+191 val_191 2008-12-31 11
+191 val_191 2008-12-31 12
+191 val_191 2008-12-31 12
+192 val_192 2008-04-08 11
+192 val_192 2008-04-08 12
+192 val_192 2008-12-31 11
+192 val_192 2008-12-31 12
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 12
+193 val_193 2008-12-31 12
+193 val_193 2008-12-31 12
+194 val_194 2008-04-08 11
+194 val_194 2008-04-08 12
+194 val_194 2008-12-31 11
+194 val_194 2008-12-31 12
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 12
+195 val_195 2008-04-08 12
+195 val_195 2008-12-31 11
+195 val_195 2008-12-31 11
+195 val_195 2008-12-31 12
+195 val_195 2008-12-31 12
+196 val_196 2008-04-08 11
+196 val_196 2008-04-08 12
+196 val_196 2008-12-31 11
+196 val_196 2008-12-31 12
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 12
+197 val_197 2008-04-08 12
+197 val_197 2008-12-31 11
+197 val_197 2008-12-31 11
+197 val_197 2008-12-31 12
+197 val_197 2008-12-31 12
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 12
+199 val_199 2008-12-31 12
+199 val_199 2008-12-31 12
+2 val_2 2008-04-08 11
+2 val_2 2008-04-08 12
+2 val_2 2008-12-31 11
+2 val_2 2008-12-31 12
+20 val_20 2008-04-08 11
+20 val_20 2008-04-08 12
+20 val_20 2008-12-31 11
+20 val_20 2008-12-31 12
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 12
+200 val_200 2008-04-08 12
+200 val_200 2008-12-31 11
+200 val_200 2008-12-31 11
+200 val_200 2008-12-31 12
+200 val_200 2008-12-31 12
+201 val_201 2008-04-08 11
+201 val_201 2008-04-08 12
+201 val_201 2008-12-31 11
+201 val_201 2008-12-31 12
+202 val_202 2008-04-08 11
+202 val_202 2008-04-08 12
+202 val_202 2008-12-31 11
+202 val_202 2008-12-31 12
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 12
+203 val_203 2008-04-08 12
+203 val_203 2008-12-31 11
+203 val_203 2008-12-31 11
+203 val_203 2008-12-31 12
+203 val_203 2008-12-31 12
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 12
+205 val_205 2008-04-08 12
+205 val_205 2008-12-31 11
+205 val_205 2008-12-31 11
+205 val_205 2008-12-31 12
+205 val_205 2008-12-31 12
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 12
+207 val_207 2008-04-08 12
+207 val_207 2008-12-31 11
+207 val_207 2008-12-31 11
+207 val_207 2008-12-31 12
+207 val_207 2008-12-31 12
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 12
+208 val_208 2008-12-31 12
+208 val_208 2008-12-31 12
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 12
+209 val_209 2008-04-08 12
+209 val_209 2008-12-31 11
+209 val_209 2008-12-31 11
+209 val_209 2008-12-31 12
+209 val_209 2008-12-31 12
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 12
+213 val_213 2008-04-08 12
+213 val_213 2008-12-31 11
+213 val_213 2008-12-31 11
+213 val_213 2008-12-31 12
+213 val_213 2008-12-31 12
+214 val_214 2008-04-08 11
+214 val_214 2008-04-08 12
+214 val_214 2008-12-31 11
+214 val_214 2008-12-31 12
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 12
+216 val_216 2008-04-08 12
+216 val_216 2008-12-31 11
+216 val_216 2008-12-31 11
+216 val_216 2008-12-31 12
+216 val_216 2008-12-31 12
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 12
+217 val_217 2008-04-08 12
+217 val_217 2008-12-31 11
+217 val_217 2008-12-31 11
+217 val_217 2008-12-31 12
+217 val_217 2008-12-31 12
+218 val_218 2008-04-08 11
+218 val_218 2008-04-08 12
+218 val_218 2008-12-31 11
+218 val_218 2008-12-31 12
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 12
+219 val_219 2008-04-08 12
+219 val_219 2008-12-31 11
+219 val_219 2008-12-31 11
+219 val_219 2008-12-31 12
+219 val_219 2008-12-31 12
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 12
+221 val_221 2008-04-08 12
+221 val_221 2008-12-31 11
+221 val_221 2008-12-31 11
+221 val_221 2008-12-31 12
+221 val_221 2008-12-31 12
+222 val_222 2008-04-08 11
+222 val_222 2008-04-08 12
+222 val_222 2008-12-31 11
+222 val_222 2008-12-31 12
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 12
+223 val_223 2008-04-08 12
+223 val_223 2008-12-31 11
+223 val_223 2008-12-31 11
+223 val_223 2008-12-31 12
+223 val_223 2008-12-31 12
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 12
+224 val_224 2008-04-08 12
+224 val_224 2008-12-31 11
+224 val_224 2008-12-31 11
+224 val_224 2008-12-31 12
+224 val_224 2008-12-31 12
+226 val_226 2008-04-08 11
+226 val_226 2008-04-08 12
+226 val_226 2008-12-31 11
+226 val_226 2008-12-31 12
+228 val_228 2008-04-08 11
+228 val_228 2008-04-08 12
+228 val_228 2008-12-31 11
+228 val_228 2008-12-31 12
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 12
+229 val_229 2008-04-08 12
+229 val_229 2008-12-31 11
+229 val_229 2008-12-31 11
+229 val_229 2008-12-31 12
+229 val_229 2008-12-31 12
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 12
+233 val_233 2008-04-08 12
+233 val_233 2008-12-31 11
+233 val_233 2008-12-31 11
+233 val_233 2008-12-31 12
+233 val_233 2008-12-31 12
+235 val_235 2008-04-08 11
+235 val_235 2008-04-08 12
+235 val_235 2008-12-31 11
+235 val_235 2008-12-31 12
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 12
+237 val_237 2008-04-08 12
+237 val_237 2008-12-31 11
+237 val_237 2008-12-31 11
+237 val_237 2008-12-31 12
+237 val_237 2008-12-31 12
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 12
+238 val_238 2008-04-08 12
+238 val_238 2008-12-31 11
+238 val_238 2008-12-31 11
+238 val_238 2008-12-31 12
+238 val_238 2008-12-31 12
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 12
+239 val_239 2008-04-08 12
+239 val_239 2008-12-31 11
+239 val_239 2008-12-31 11
+239 val_239 2008-12-31 12
+239 val_239 2008-12-31 12
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 12
+24 val_24 2008-04-08 12
+24 val_24 2008-12-31 11
+24 val_24 2008-12-31 11
+24 val_24 2008-12-31 12
+24 val_24 2008-12-31 12
+241 val_241 2008-04-08 11
+241 val_241 2008-04-08 12
+241 val_241 2008-12-31 11
+241 val_241 2008-12-31 12
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 12
+242 val_242 2008-04-08 12
+242 val_242 2008-12-31 11
+242 val_242 2008-12-31 11
+242 val_242 2008-12-31 12
+242 val_242 2008-12-31 12
+244 val_244 2008-04-08 11
+244 val_244 2008-04-08 12
+244 val_244 2008-12-31 11
+244 val_244 2008-12-31 12
+247 val_247 2008-04-08 11
+247 val_247 2008-04-08 12
+247 val_247 2008-12-31 11
+247 val_247 2008-12-31 12
+248 val_248 2008-04-08 11
+248 val_248 2008-04-08 12
+248 val_248 2008-12-31 11
+248 val_248 2008-12-31 12
+249 val_249 2008-04-08 11
+249 val_249 2008-04-08 12
+249 val_249 2008-12-31 11
+249 val_249 2008-12-31 12
+252 val_252 2008-04-08 11
+252 val_252 2008-04-08 12
+252 val_252 2008-12-31 11
+252 val_252 2008-12-31 12
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 12
+255 val_255 2008-04-08 12
+255 val_255 2008-12-31 11
+255 val_255 2008-12-31 11
+255 val_255 2008-12-31 12
+255 val_255 2008-12-31 12
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 12
+256 val_256 2008-04-08 12
+256 val_256 2008-12-31 11
+256 val_256 2008-12-31 11
+256 val_256 2008-12-31 12
+256 val_256 2008-12-31 12
+257 val_257 2008-04-08 11
+257 val_257 2008-04-08 12
+257 val_257 2008-12-31 11
+257 val_257 2008-12-31 12
+258 val_258 2008-04-08 11
+258 val_258 2008-04-08 12
+258 val_258 2008-12-31 11
+258 val_258 2008-12-31 12
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 12
+26 val_26 2008-04-08 12
+26 val_26 2008-12-31 11
+26 val_26 2008-12-31 11
+26 val_26 2008-12-31 12
+26 val_26 2008-12-31 12
+260 val_260 2008-04-08 11
+260 val_260 2008-04-08 12
+260 val_260 2008-12-31 11
+260 val_260 2008-12-31 12
+262 val_262 2008-04-08 11
+262 val_262 2008-04-08 12
+262 val_262 2008-12-31 11
+262 val_262 2008-12-31 12
+263 val_263 2008-04-08 11
+263 val_263 2008-04-08 12
+263 val_263 2008-12-31 11
+263 val_263 2008-12-31 12
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 12
+265 val_265 2008-04-08 12
+265 val_265 2008-12-31 11
+265 val_265 2008-12-31 11
+265 val_265 2008-12-31 12
+265 val_265 2008-12-31 12
+266 val_266 2008-04-08 11
+266 val_266 2008-04-08 12
+266 val_266 2008-12-31 11
+266 val_266 2008-12-31 12
+27 val_27 2008-04-08 11
+27 val_27 2008-04-08 12
+27 val_27 2008-12-31 11
+27 val_27 2008-12-31 12
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 12
+272 val_272 2008-04-08 12
+272 val_272 2008-12-31 11
+272 val_272 2008-12-31 11
+272 val_272 2008-12-31 12
+272 val_272 2008-12-31 12
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 12
+273 val_273 2008-12-31 12
+273 val_273 2008-12-31 12
+274 val_274 2008-04-08 11
+274 val_274 2008-04-08 12
+274 val_274 2008-12-31 11
+274 val_274 2008-12-31 12
+275 val_275 2008-04-08 11
+275 val_275 2008-04-08 12
+275 val_275 2008-12-31 11
+275 val_275 2008-12-31 12
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 12
+278 val_278 2008-04-08 12
+278 val_278 2008-12-31 11
+278 val_278 2008-12-31 11
+278 val_278 2008-12-31 12
+278 val_278 2008-12-31 12
+28 val_28 2008-04-08 11
+28 val_28 2008-04-08 12
+28 val_28 2008-12-31 11
+28 val_28 2008-12-31 12
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 12
+280 val_280 2008-04-08 12
+280 val_280 2008-12-31 11
+280 val_280 2008-12-31 11
+280 val_280 2008-12-31 12
+280 val_280 2008-12-31 12
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 12
+281 val_281 2008-04-08 12
+281 val_281 2008-12-31 11
+281 val_281 2008-12-31 11
+281 val_281 2008-12-31 12
+281 val_281 2008-12-31 12
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 12
+282 val_282 2008-04-08 12
+282 val_282 2008-12-31 11
+282 val_282 2008-12-31 11
+282 val_282 2008-12-31 12
+282 val_282 2008-12-31 12
+283 val_283 2008-04-08 11
+283 val_283 2008-04-08 12
+283 val_283 2008-12-31 11
+283 val_283 2008-12-31 12
+284 val_284 2008-04-08 11
+284 val_284 2008-04-08 12
+284 val_284 2008-12-31 11
+284 val_284 2008-12-31 12
+285 val_285 2008-04-08 11
+285 val_285 2008-04-08 12
+285 val_285 2008-12-31 11
+285 val_285 2008-12-31 12
+286 val_286 2008-04-08 11
+286 val_286 2008-04-08 12
+286 val_286 2008-12-31 11
+286 val_286 2008-12-31 12
+287 val_287 2008-04-08 11
+287 val_287 2008-04-08 12
+287 val_287 2008-12-31 11
+287 val_287 2008-12-31 12
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 12
+288 val_288 2008-04-08 12
+288 val_288 2008-12-31 11
+288 val_288 2008-12-31 11
+288 val_288 2008-12-31 12
+288 val_288 2008-12-31 12
+289 val_289 2008-04-08 11
+289 val_289 2008-04-08 12
+289 val_289 2008-12-31 11
+289 val_289 2008-12-31 12
+291 val_291 2008-04-08 11
+291 val_291 2008-04-08 12
+291 val_291 2008-12-31 11
+291 val_291 2008-12-31 12
+292 val_292 2008-04-08 11
+292 val_292 2008-04-08 12
+292 val_292 2008-12-31 11
+292 val_292 2008-12-31 12
+296 val_296 2008-04-08 11
+296 val_296 2008-04-08 12
+296 val_296 2008-12-31 11
+296 val_296 2008-12-31 12
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 12
+298 val_298 2008-12-31 12
+298 val_298 2008-12-31 12
+30 val_30 2008-04-08 11
+30 val_30 2008-04-08 12
+30 val_30 2008-12-31 11
+30 val_30 2008-12-31 12
+302 val_302 2008-04-08 11
+302 val_302 2008-04-08 12
+302 val_302 2008-12-31 11
+302 val_302 2008-12-31 12
+305 val_305 2008-04-08 11
+305 val_305 2008-04-08 12
+305 val_305 2008-12-31 11
+305 val_305 2008-12-31 12
+306 val_306 2008-04-08 11
+306 val_306 2008-04-08 12
+306 val_306 2008-12-31 11
+306 val_306 2008-12-31 12
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 12
+307 val_307 2008-04-08 12
+307 val_307 2008-12-31 11
+307 val_307 2008-12-31 11
+307 val_307 2008-12-31 12
+307 val_307 2008-12-31 12
+308 val_308 2008-04-08 11
+308 val_308 2008-04-08 12
+308 val_308 2008-12-31 11
+308 val_308 2008-12-31 12
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 12
+309 val_309 2008-04-08 12
+309 val_309 2008-12-31 11
+309 val_309 2008-12-31 11
+309 val_309 2008-12-31 12
+309 val_309 2008-12-31 12
+310 val_310 2008-04-08 11
+310 val_310 2008-04-08 12
+310 val_310 2008-12-31 11
+310 val_310 2008-12-31 12
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 12
+311 val_311 2008-12-31 12
+311 val_311 2008-12-31 12
+315 val_315 2008-04-08 11
+315 val_315 2008-04-08 12
+315 val_315 2008-12-31 11
+315 val_315 2008-12-31 12
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 12
+316 val_316 2008-12-31 12
+316 val_316 2008-12-31 12
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 12
+317 val_317 2008-04-08 12
+317 val_317 2008-12-31 11
+317 val_317 2008-12-31 11
+317 val_317 2008-12-31 12
+317 val_317 2008-12-31 12
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 12
+318 val_318 2008-12-31 12
+318 val_318 2008-12-31 12
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 12
+321 val_321 2008-04-08 12
+321 val_321 2008-12-31 11
+321 val_321 2008-12-31 11
+321 val_321 2008-12-31 12
+321 val_321 2008-12-31 12
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 12
+322 val_322 2008-04-08 12
+322 val_322 2008-12-31 11
+322 val_322 2008-12-31 11
+322 val_322 2008-12-31 12
+322 val_322 2008-12-31 12
+323 val_323 2008-04-08 11
+323 val_323 2008-04-08 12
+323 val_323 2008-12-31 11
+323 val_323 2008-12-31 12
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 12
+325 val_325 2008-04-08 12
+325 val_325 2008-12-31 11
+325 val_325 2008-12-31 11
+325 val_325 2008-12-31 12
+325 val_325 2008-12-31 12
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 12
+327 val_327 2008-12-31 12
+327 val_327 2008-12-31 12
+33 val_33 2008-04-08 11
+33 val_33 2008-04-08 12
+33 val_33 2008-12-31 11
+33 val_33 2008-12-31 12
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 12
+331 val_331 2008-04-08 12
+331 val_331 2008-12-31 11
+331 val_331 2008-12-31 11
+331 val_331 2008-12-31 12
+331 val_331 2008-12-31 12
+332 val_332 2008-04-08 11
+332 val_332 2008-04-08 12
+332 val_332 2008-12-31 11
+332 val_332 2008-12-31 12
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 12
+333 val_333 2008-04-08 12
+333 val_333 2008-12-31 11
+333 val_333 2008-12-31 11
+333 val_333 2008-12-31 12
+333 val_333 2008-12-31 12
+335 val_335 2008-04-08 11
+335 val_335 2008-04-08 12
+335 val_335 2008-12-31 11
+335 val_335 2008-12-31 12
+336 val_336 2008-04-08 11
+336 val_336 2008-04-08 12
+336 val_336 2008-12-31 11
+336 val_336 2008-12-31 12
+338 val_338 2008-04-08 11
+338 val_338 2008-04-08 12
+338 val_338 2008-12-31 11
+338 val_338 2008-12-31 12
+339 val_339 2008-04-08 11
+339 val_339 2008-04-08 12
+339 val_339 2008-12-31 11
+339 val_339 2008-12-31 12
+34 val_34 2008-04-08 11
+34 val_34 2008-04-08 12
+34 val_34 2008-12-31 11
+34 val_34 2008-12-31 12
+341 val_341 2008-04-08 11
+341 val_341 2008-04-08 12
+341 val_341 2008-12-31 11
+341 val_341 2008-12-31 12
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 12
+342 val_342 2008-04-08 12
+342 val_342 2008-12-31 11
+342 val_342 2008-12-31 11
+342 val_342 2008-12-31 12
+342 val_342 2008-12-31 12
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 12
+344 val_344 2008-04-08 12
+344 val_344 2008-12-31 11
+344 val_344 2008-12-31 11
+344 val_344 2008-12-31 12
+344 val_344 2008-12-31 12
+345 val_345 2008-04-08 11
+345 val_345 2008-04-08 12
+345 val_345 2008-12-31 11
+345 val_345 2008-12-31 12
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 12
+35 val_35 2008-12-31 12
+35 val_35 2008-12-31 12
+351 val_351 2008-04-08 11
+351 val_351 2008-04-08 12
+351 val_351 2008-12-31 11
+351 val_351 2008-12-31 12
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 12
+353 val_353 2008-04-08 12
+353 val_353 2008-12-31 11
+353 val_353 2008-12-31 11
+353 val_353 2008-12-31 12
+353 val_353 2008-12-31 12
+356 val_356 2008-04-08 11
+356 val_356 2008-04-08 12
+356 val_356 2008-12-31 11
+356 val_356 2008-12-31 12
+360 val_360 2008-04-08 11
+360 val_360 2008-04-08 12
+360 val_360 2008-12-31 11
+360 val_360 2008-12-31 12
+362 val_362 2008-04-08 11
+362 val_362 2008-04-08 12
+362 val_362 2008-12-31 11
+362 val_362 2008-12-31 12
+364 val_364 2008-04-08 11
+364 val_364 2008-04-08 12
+364 val_364 2008-12-31 11
+364 val_364 2008-12-31 12
+365 val_365 2008-04-08 11
+365 val_365 2008-04-08 12
+365 val_365 2008-12-31 11
+365 val_365 2008-12-31 12
+366 val_366 2008-04-08 11
+366 val_366 2008-04-08 12
+366 val_366 2008-12-31 11
+366 val_366 2008-12-31 12
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 12
+367 val_367 2008-04-08 12
+367 val_367 2008-12-31 11
+367 val_367 2008-12-31 11
+367 val_367 2008-12-31 12
+367 val_367 2008-12-31 12
+368 val_368 2008-04-08 11
+368 val_368 2008-04-08 12
+368 val_368 2008-12-31 11
+368 val_368 2008-12-31 12
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 12
+369 val_369 2008-12-31 12
+369 val_369 2008-12-31 12
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 12
+37 val_37 2008-04-08 12
+37 val_37 2008-12-31 11
+37 val_37 2008-12-31 11
+37 val_37 2008-12-31 12
+37 val_37 2008-12-31 12
+373 val_373 2008-04-08 11
+373 val_373 2008-04-08 12
+373 val_373 2008-12-31 11
+373 val_373 2008-12-31 12
+374 val_374 2008-04-08 11
+374 val_374 2008-04-08 12
+374 val_374 2008-12-31 11
+374 val_374 2008-12-31 12
+375 val_375 2008-04-08 11
+375 val_375 2008-04-08 12
+375 val_375 2008-12-31 11
+375 val_375 2008-12-31 12
+377 val_377 2008-04-08 11
+377 val_377 2008-04-08 12
+377 val_377 2008-12-31 11
+377 val_377 2008-12-31 12
+378 val_378 2008-04-08 11
+378 val_378 2008-04-08 12
+378 val_378 2008-12-31 11
+378 val_378 2008-12-31 12
+379 val_379 2008-04-08 11
+379 val_379 2008-04-08 12
+379 val_379 2008-12-31 11
+379 val_379 2008-12-31 12
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 12
+382 val_382 2008-04-08 12
+382 val_382 2008-12-31 11
+382 val_382 2008-12-31 11
+382 val_382 2008-12-31 12
+382 val_382 2008-12-31 12
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 12
+384 val_384 2008-12-31 12
+384 val_384 2008-12-31 12
+386 val_386 2008-04-08 11
+386 val_386 2008-04-08 12
+386 val_386 2008-12-31 11
+386 val_386 2008-12-31 12
+389 val_389 2008-04-08 11
+389 val_389 2008-04-08 12
+389 val_389 2008-12-31 11
+389 val_389 2008-12-31 12
+392 val_392 2008-04-08 11
+392 val_392 2008-04-08 12
+392 val_392 2008-12-31 11
+392 val_392 2008-12-31 12
+393 val_393 2008-04-08 11
+393 val_393 2008-04-08 12
+393 val_393 2008-12-31 11
+393 val_393 2008-12-31 12
+394 val_394 2008-04-08 11
+394 val_394 2008-04-08 12
+394 val_394 2008-12-31 11
+394 val_394 2008-12-31 12
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 12
+395 val_395 2008-04-08 12
+395 val_395 2008-12-31 11
+395 val_395 2008-12-31 11
+395 val_395 2008-12-31 12
+395 val_395 2008-12-31 12
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 12
+396 val_396 2008-12-31 12
+396 val_396 2008-12-31 12
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 12
+397 val_397 2008-04-08 12
+397 val_397 2008-12-31 11
+397 val_397 2008-12-31 11
+397 val_397 2008-12-31 12
+397 val_397 2008-12-31 12
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 12
+399 val_399 2008-04-08 12
+399 val_399 2008-12-31 11
+399 val_399 2008-12-31 11
+399 val_399 2008-12-31 12
+399 val_399 2008-12-31 12
+4 val_4 2008-04-08 11
+4 val_4 2008-04-08 12
+4 val_4 2008-12-31 11
+4 val_4 2008-12-31 12
+400 val_400 2008-04-08 11
+400 val_400 2008-04-08 12
+400 val_400 2008-12-31 11
+400 val_400 2008-12-31 12
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+402 val_402 2008-04-08 11
+402 val_402 2008-04-08 12
+402 val_402 2008-12-31 11
+402 val_402 2008-12-31 12
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 12
+403 val_403 2008-12-31 12
+403 val_403 2008-12-31 12
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 12
+404 val_404 2008-04-08 12
+404 val_404 2008-12-31 11
+404 val_404 2008-12-31 11
+404 val_404 2008-12-31 12
+404 val_404 2008-12-31 12
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+407 val_407 2008-04-08 11
+407 val_407 2008-04-08 12
+407 val_407 2008-12-31 11
+407 val_407 2008-12-31 12
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 12
+409 val_409 2008-12-31 12
+409 val_409 2008-12-31 12
+41 val_41 2008-04-08 11
+41 val_41 2008-04-08 12
+41 val_41 2008-12-31 11
+41 val_41 2008-12-31 12
+411 val_411 2008-04-08 11
+411 val_411 2008-04-08 12
+411 val_411 2008-12-31 11
+411 val_411 2008-12-31 12
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 12
+413 val_413 2008-04-08 12
+413 val_413 2008-12-31 11
+413 val_413 2008-12-31 11
+413 val_413 2008-12-31 12
+413 val_413 2008-12-31 12
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 12
+414 val_414 2008-04-08 12
+414 val_414 2008-12-31 11
+414 val_414 2008-12-31 11
+414 val_414 2008-12-31 12
+414 val_414 2008-12-31 12
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 12
+417 val_417 2008-12-31 12
+417 val_417 2008-12-31 12
+418 val_418 2008-04-08 11
+418 val_418 2008-04-08 12
+418 val_418 2008-12-31 11
+418 val_418 2008-12-31 12
+419 val_419 2008-04-08 11
+419 val_419 2008-04-08 12
+419 val_419 2008-12-31 11
+419 val_419 2008-12-31 12
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 12
+42 val_42 2008-04-08 12
+42 val_42 2008-12-31 11
+42 val_42 2008-12-31 11
+42 val_42 2008-12-31 12
+42 val_42 2008-12-31 12
+421 val_421 2008-04-08 11
+421 val_421 2008-04-08 12
+421 val_421 2008-12-31 11
+421 val_421 2008-12-31 12
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 12
+424 val_424 2008-04-08 12
+424 val_424 2008-12-31 11
+424 val_424 2008-12-31 11
+424 val_424 2008-12-31 12
+424 val_424 2008-12-31 12
+427 val_427 2008-04-08 11
+427 val_427 2008-04-08 12
+427 val_427 2008-12-31 11
+427 val_427 2008-12-31 12
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 12
+429 val_429 2008-04-08 12
+429 val_429 2008-12-31 11
+429 val_429 2008-12-31 11
+429 val_429 2008-12-31 12
+429 val_429 2008-12-31 12
+43 val_43 2008-04-08 11
+43 val_43 2008-04-08 12
+43 val_43 2008-12-31 11
+43 val_43 2008-12-31 12
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 12
+430 val_430 2008-12-31 12
+430 val_430 2008-12-31 12
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 12
+431 val_431 2008-12-31 12
+431 val_431 2008-12-31 12
+432 val_432 2008-04-08 11
+432 val_432 2008-04-08 12
+432 val_432 2008-12-31 11
+432 val_432 2008-12-31 12
+435 val_435 2008-04-08 11
+435 val_435 2008-04-08 12
+435 val_435 2008-12-31 11
+435 val_435 2008-12-31 12
+436 val_436 2008-04-08 11
+436 val_436 2008-04-08 12
+436 val_436 2008-12-31 11
+436 val_436 2008-12-31 12
+437 val_437 2008-04-08 11
+437 val_437 2008-04-08 12
+437 val_437 2008-12-31 11
+437 val_437 2008-12-31 12
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 12
+438 val_438 2008-12-31 12
+438 val_438 2008-12-31 12
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 12
+439 val_439 2008-04-08 12
+439 val_439 2008-12-31 11
+439 val_439 2008-12-31 11
+439 val_439 2008-12-31 12
+439 val_439 2008-12-31 12
+44 val_44 2008-04-08 11
+44 val_44 2008-04-08 12
+44 val_44 2008-12-31 11
+44 val_44 2008-12-31 12
+443 val_443 2008-04-08 11
+443 val_443 2008-04-08 12
+443 val_443 2008-12-31 11
+443 val_443 2008-12-31 12
+444 val_444 2008-04-08 11
+444 val_444 2008-04-08 12
+444 val_444 2008-12-31 11
+444 val_444 2008-12-31 12
+446 val_446 2008-04-08 11
+446 val_446 2008-04-08 12
+446 val_446 2008-12-31 11
+446 val_446 2008-12-31 12
+448 val_448 2008-04-08 11
+448 val_448 2008-04-08 12
+448 val_448 2008-12-31 11
+448 val_448 2008-12-31 12
+449 val_449 2008-04-08 11
+449 val_449 2008-04-08 12
+449 val_449 2008-12-31 11
+449 val_449 2008-12-31 12
+452 val_452 2008-04-08 11
+452 val_452 2008-04-08 12
+452 val_452 2008-12-31 11
+452 val_452 2008-12-31 12
+453 val_453 2008-04-08 11
+453 val_453 2008-04-08 12
+453 val_453 2008-12-31 11
+453 val_453 2008-12-31 12
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 12
+454 val_454 2008-12-31 12
+454 val_454 2008-12-31 12
+455 val_455 2008-04-08 11
+455 val_455 2008-04-08 12
+455 val_455 2008-12-31 11
+455 val_455 2008-12-31 12
+457 val_457 2008-04-08 11
+457 val_457 2008-04-08 12
+457 val_457 2008-12-31 11
+457 val_457 2008-12-31 12
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 12
+458 val_458 2008-04-08 12
+458 val_458 2008-12-31 11
+458 val_458 2008-12-31 11
+458 val_458 2008-12-31 12
+458 val_458 2008-12-31 12
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 12
+459 val_459 2008-04-08 12
+459 val_459 2008-12-31 11
+459 val_459 2008-12-31 11
+459 val_459 2008-12-31 12
+459 val_459 2008-12-31 12
+460 val_460 2008-04-08 11
+460 val_460 2008-04-08 12
+460 val_460 2008-12-31 11
+460 val_460 2008-12-31 12
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 12
+462 val_462 2008-04-08 12
+462 val_462 2008-12-31 11
+462 val_462 2008-12-31 11
+462 val_462 2008-12-31 12
+462 val_462 2008-12-31 12
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 12
+463 val_463 2008-04-08 12
+463 val_463 2008-12-31 11
+463 val_463 2008-12-31 11
+463 val_463 2008-12-31 12
+463 val_463 2008-12-31 12
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 12
+466 val_466 2008-12-31 12
+466 val_466 2008-12-31 12
+467 val_467 2008-04-08 11
+467 val_467 2008-04-08 12
+467 val_467 2008-12-31 11
+467 val_467 2008-12-31 12
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+47 val_47 2008-04-08 11
+47 val_47 2008-04-08 12
+47 val_47 2008-12-31 11
+47 val_47 2008-12-31 12
+470 val_470 2008-04-08 11
+470 val_470 2008-04-08 12
+470 val_470 2008-12-31 11
+470 val_470 2008-12-31 12
+472 val_472 2008-04-08 11
+472 val_472 2008-04-08 12
+472 val_472 2008-12-31 11
+472 val_472 2008-12-31 12
+475 val_475 2008-04-08 11
+475 val_475 2008-04-08 12
+475 val_475 2008-12-31 11
+475 val_475 2008-12-31 12
+477 val_477 2008-04-08 11
+477 val_477 2008-04-08 12
+477 val_477 2008-12-31 11
+477 val_477 2008-12-31 12
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 12
+478 val_478 2008-04-08 12
+478 val_478 2008-12-31 11
+478 val_478 2008-12-31 11
+478 val_478 2008-12-31 12
+478 val_478 2008-12-31 12
+479 val_479 2008-04-08 11
+479 val_479 2008-04-08 12
+479 val_479 2008-12-31 11
+479 val_479 2008-12-31 12
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 12
+480 val_480 2008-12-31 12
+480 val_480 2008-12-31 12
+481 val_481 2008-04-08 11
+481 val_481 2008-04-08 12
+481 val_481 2008-12-31 11
+481 val_481 2008-12-31 12
+482 val_482 2008-04-08 11
+482 val_482 2008-04-08 12
+482 val_482 2008-12-31 11
+482 val_482 2008-12-31 12
+483 val_483 2008-04-08 11
+483 val_483 2008-04-08 12
+483 val_483 2008-12-31 11
+483 val_483 2008-12-31 12
+484 val_484 2008-04-08 11
+484 val_484 2008-04-08 12
+484 val_484 2008-12-31 11
+484 val_484 2008-12-31 12
+485 val_485 2008-04-08 11
+485 val_485 2008-04-08 12
+485 val_485 2008-12-31 11
+485 val_485 2008-12-31 12
+487 val_487 2008-04-08 11
+487 val_487 2008-04-08 12
+487 val_487 2008-12-31 11
+487 val_487 2008-12-31 12
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+490 val_490 2008-04-08 11
+490 val_490 2008-04-08 12
+490 val_490 2008-12-31 11
+490 val_490 2008-12-31 12
+491 val_491 2008-04-08 11
+491 val_491 2008-04-08 12
+491 val_491 2008-12-31 11
+491 val_491 2008-12-31 12
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 12
+492 val_492 2008-04-08 12
+492 val_492 2008-12-31 11
+492 val_492 2008-12-31 11
+492 val_492 2008-12-31 12
+492 val_492 2008-12-31 12
+493 val_493 2008-04-08 11
+493 val_493 2008-04-08 12
+493 val_493 2008-12-31 11
+493 val_493 2008-12-31 12
+494 val_494 2008-04-08 11
+494 val_494 2008-04-08 12
+494 val_494 2008-12-31 11
+494 val_494 2008-12-31 12
+495 val_495 2008-04-08 11
+495 val_495 2008-04-08 12
+495 val_495 2008-12-31 11
+495 val_495 2008-12-31 12
+496 val_496 2008-04-08 11
+496 val_496 2008-04-08 12
+496 val_496 2008-12-31 11
+496 val_496 2008-12-31 12
+497 val_497 2008-04-08 11
+497 val_497 2008-04-08 12
+497 val_497 2008-12-31 11
+497 val_497 2008-12-31 12
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 12
+498 val_498 2008-12-31 12
+498 val_498 2008-12-31 12
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 12
+5 val_5 2008-12-31 12
+5 val_5 2008-12-31 12
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 12
+51 val_51 2008-04-08 12
+51 val_51 2008-12-31 11
+51 val_51 2008-12-31 11
+51 val_51 2008-12-31 12
+51 val_51 2008-12-31 12
+53 val_53 2008-04-08 11
+53 val_53 2008-04-08 12
+53 val_53 2008-12-31 11
+53 val_53 2008-12-31 12
+54 val_54 2008-04-08 11
+54 val_54 2008-04-08 12
+54 val_54 2008-12-31 11
+54 val_54 2008-12-31 12
+57 val_57 2008-04-08 11
+57 val_57 2008-04-08 12
+57 val_57 2008-12-31 11
+57 val_57 2008-12-31 12
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 12
+58 val_58 2008-04-08 12
+58 val_58 2008-12-31 11
+58 val_58 2008-12-31 11
+58 val_58 2008-12-31 12
+58 val_58 2008-12-31 12
+64 val_64 2008-04-08 11
+64 val_64 2008-04-08 12
+64 val_64 2008-12-31 11
+64 val_64 2008-12-31 12
+65 val_65 2008-04-08 11
+65 val_65 2008-04-08 12
+65 val_65 2008-12-31 11
+65 val_65 2008-12-31 12
+66 val_66 2008-04-08 11
+66 val_66 2008-04-08 12
+66 val_66 2008-12-31 11
+66 val_66 2008-12-31 12
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 12
+67 val_67 2008-04-08 12
+67 val_67 2008-12-31 11
+67 val_67 2008-12-31 11
+67 val_67 2008-12-31 12
+67 val_67 2008-12-31 12
+69 val_69 2008-04-08 11
+69 val_69 2008-04-08 12
+69 val_69 2008-12-31 11
+69 val_69 2008-12-31 12
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 12
+70 val_70 2008-12-31 12
+70 val_70 2008-12-31 12
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 12
+72 val_72 2008-04-08 12
+72 val_72 2008-12-31 11
+72 val_72 2008-12-31 11
+72 val_72 2008-12-31 12
+72 val_72 2008-12-31 12
+74 val_74 2008-04-08 11
+74 val_74 2008-04-08 12
+74 val_74 2008-12-31 11
+74 val_74 2008-12-31 12
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 12
+76 val_76 2008-04-08 12
+76 val_76 2008-12-31 11
+76 val_76 2008-12-31 11
+76 val_76 2008-12-31 12
+76 val_76 2008-12-31 12
+77 val_77 2008-04-08 11
+77 val_77 2008-04-08 12
+77 val_77 2008-12-31 11
+77 val_77 2008-12-31 12
+78 val_78 2008-04-08 11
+78 val_78 2008-04-08 12
+78 val_78 2008-12-31 11
+78 val_78 2008-12-31 12
+8 val_8 2008-04-08 11
+8 val_8 2008-04-08 12
+8 val_8 2008-12-31 11
+8 val_8 2008-12-31 12
+80 val_80 2008-04-08 11
+80 val_80 2008-04-08 12
+80 val_80 2008-12-31 11
+80 val_80 2008-12-31 12
+82 val_82 2008-04-08 11
+82 val_82 2008-04-08 12
+82 val_82 2008-12-31 11
+82 val_82 2008-12-31 12
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 12
+83 val_83 2008-04-08 12
+83 val_83 2008-12-31 11
+83 val_83 2008-12-31 11
+83 val_83 2008-12-31 12
+83 val_83 2008-12-31 12
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 12
+84 val_84 2008-04-08 12
+84 val_84 2008-12-31 11
+84 val_84 2008-12-31 11
+84 val_84 2008-12-31 12
+84 val_84 2008-12-31 12
+85 val_85 2008-04-08 11
+85 val_85 2008-04-08 12
+85 val_85 2008-12-31 11
+85 val_85 2008-12-31 12
+86 val_86 2008-04-08 11
+86 val_86 2008-04-08 12
+86 val_86 2008-12-31 11
+86 val_86 2008-12-31 12
+87 val_87 2008-04-08 11
+87 val_87 2008-04-08 12
+87 val_87 2008-12-31 11
+87 val_87 2008-12-31 12
+9 val_9 2008-04-08 11
+9 val_9 2008-04-08 12
+9 val_9 2008-12-31 11
+9 val_9 2008-12-31 12
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 12
+90 val_90 2008-12-31 12
+90 val_90 2008-12-31 12
+92 val_92 2008-04-08 11
+92 val_92 2008-04-08 12
+92 val_92 2008-12-31 11
+92 val_92 2008-12-31 12
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 12
+95 val_95 2008-04-08 12
+95 val_95 2008-12-31 11
+95 val_95 2008-12-31 11
+95 val_95 2008-12-31 12
+95 val_95 2008-12-31 12
+96 val_96 2008-04-08 11
+96 val_96 2008-04-08 12
+96 val_96 2008-12-31 11
+96 val_96 2008-12-31 12
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 12
+97 val_97 2008-04-08 12
+97 val_97 2008-12-31 11
+97 val_97 2008-12-31 11
+97 val_97 2008-12-31 12
+97 val_97 2008-12-31 12
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 12
+98 val_98 2008-04-08 12
+98 val_98 2008-12-31 11
+98 val_98 2008-12-31 11
+98 val_98 2008-12-31 12
+98 val_98 2008-12-31 12
[5/7] hive git commit: HIVE-13566: Auto-gather column stats - phase 1
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Posted by px...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec4b936e/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
new file mode 100644
index 0000000..a76bf5f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
@@ -0,0 +1,1500 @@
+PREHOOK: query: drop table src_multi1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: insert into table src_multi1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: insert into table src_multi1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended select * from src_multi1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select * from src_multi1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: src_multi1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert into table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert into table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key string 0 205 2.812 3 from deserializer
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key string 0 205 2.812 3 from deserializer
+PREHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 1000
+ rawDataSize 10624
+ totalSize 11624
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key string 0 205 2.812 3 from deserializer
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+value string 0 214 6.812 7 from deserializer
+PREHOOK: query: insert into table b select NULL, NULL from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@b
+POSTHOOK: query: insert into table b select NULL, NULL from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: b.key SIMPLE []
+POSTHOOK: Lineage: b.value SIMPLE []
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key string 10 205 2.812 3 from deserializer
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+value string 10 214 6.812 7 from deserializer
+PREHOOK: query: insert into table b(value) select key+100000 from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@b
+POSTHOOK: query: insert into table b(value) select key+100000 from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: b.key SIMPLE []
+POSTHOOK: Lineage: b.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+key string 20 205 2.812 3 from deserializer
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment
+
+value string 10 214 8.0 8 from deserializer
+PREHOOK: query: drop table src_multi2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi2 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_multi2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi2
+POSTHOOK: query: describe formatted src_multi2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi2
+# col_name data_type comment
+
+key string default
+value string default
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 508
+ rawDataSize 5400
+ totalSize 5908
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part14 (key string)
+ partitioned by (value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string)
+ partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from (
+ select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@value=
+POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: nzhang_part14
+ Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table src5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src5 as select key, value from src limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src5
+POSTHOOK: query: create table src5 as select key, value from src limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from src5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src5
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from src5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src5
+POSTHOOK: Output: default@nzhang_part14@value=val_165
+POSTHOOK: Output: default@nzhang_part14@value=val_238
+POSTHOOK: Output: default@nzhang_part14@value=val_27
+POSTHOOK: Output: default@nzhang_part14@value=val_311
+POSTHOOK: Output: default@nzhang_part14@value=val_86
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: nzhang_part14
+ Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table alter5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table alter5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 500
+ rawDataSize 1406
+ totalSize 1906
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: alter5
+ Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: col1 (type: string), 'a' (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table alter5
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alter5
+PREHOOK: Output: default@alter5
+POSTHOOK: query: drop table alter5
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alter5
+POSTHOOK: Output: default@alter5
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name data_type comment
+
+col1 string
+
+# Partition Information
+# col_name data_type comment
+
+dt string
+
+# Detailed Partition Information
+Partition Value: [a]
+Database: default
+Table: alter5
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"col1\":\"true\"}}
+ numFiles 1
+ totalSize 1906
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: alter5
+ Statistics: Num rows: 19 Data size: 1653 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: col1 (type: string), 'a' (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 19 Data size: 3268 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: drop table src_stat_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_stat_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_stat_part
+POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_stat_part
+PREHOOK: query: insert into table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: query: insert into table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+partitionid int
+
+# Detailed Partition Information
+Partition Value: [1]
+Database: default
+Table: src_stat_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 5
+ rawDataSize 38
+ totalSize 43
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table src_stat_part partition (partitionId=2)
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: query: insert into table src_stat_part partition (partitionId=2)
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+partitionid int
+
+# Detailed Partition Information
+Partition Value: [2]
+Database: default
+Table: src_stat_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 25
+ rawDataSize 191
+ totalSize 216
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table srcbucket_mapjoin
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: drop table tab_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tab_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: drop table srcbucket_mapjoin_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert into table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert into table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab_part
+POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab_part
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2008-04-08]
+Database: default
+Table: tab_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 4
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [key]
+Sort Columns: [Order(col:key, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert into table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert into table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab
+POSTHOOK: query: describe formatted tab partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2008-04-08]
+Database: default
+Table: tab
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 242
+ rawDataSize 2566
+ totalSize 2808
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 2
+Bucket Columns: [key]
+Sort Columns: [Order(col:key, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+ partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: describe formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr)
+select key, value, ds, hr from (
+ select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a
+ union all
+ select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+ union all
+ select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [1, 3]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 2
+ rawDataSize 6
+ totalSize 8
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2010-03-03, 12]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 1000
+ rawDataSize 10624
+ totalSize 11624
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name data_type comment
+
+key string
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+# Detailed Partition Information
+Partition Value: [2010-03-03, 12]
+Database: default
+Table: nzhang_part14
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+ numFiles 1
+ numRows 1000
+ rawDataSize 10624
+ totalSize 11624
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: drop table c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@c
+POSTHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@c
+PREHOOK: query: FROM srcpart
+INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@a@ds=2010-03-11
+PREHOOK: Output: default@b@ds=2010-04-11
+PREHOOK: Output: default@c@ds=2010-05-11
+POSTHOOK: query: FROM srcpart
+INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=11
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=12
+POSTHOOK: Output: default@b@ds=2010-04-11/hr=12
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=11
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=12
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain select key from a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select value from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select key from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select value from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: explain select key from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+