You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2015/09/02 08:36:15 UTC
hive git commit: HIVE-11698: Add additional test for
PointLookupOptimizer (Jesus Camacho Rodriguez,
reviewed by Hari Sankar Sivarama Subramaniyan)
Repository: hive
Updated Branches:
refs/heads/master f2056a13e -> dbdd6116b
HIVE-11698: Add additional test for PointLookupOptimizer (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dbdd6116
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dbdd6116
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dbdd6116
Branch: refs/heads/master
Commit: dbdd6116bd9e25bdb5112d21fd40ec09d7f39adc
Parents: f2056a1
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Sep 2 08:35:27 2015 +0200
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Sep 2 08:35:37 2015 +0200
----------------------------------------------------------------------
.../test/queries/clientpositive/pointlookup3.q | 41 +
.../results/clientpositive/pointlookup3.q.out | 1394 ++++++++++++++++++
2 files changed, 1435 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/dbdd6116/ql/src/test/queries/clientpositive/pointlookup3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/pointlookup3.q b/ql/src/test/queries/clientpositive/pointlookup3.q
new file mode 100644
index 0000000..3daa94b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/pointlookup3.q
@@ -0,0 +1,41 @@
+drop table pcr_t1;
+
+create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string);
+insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key;
+insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key;
+
+set hive.optimize.point.lookup.min=2;
+set hive.optimize.point.lookup.extract=true;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2)
+order by key, value, ds1, ds2;
+
+explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2)
+order by key, value, ds1, ds2;
+
+explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08'
+order by t1.key;
+
+explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09'
+order by t1.key;
+
+explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2)
+order by t2.key, t2.value, t1.ds1;
+
+drop table pcr_t1;
http://git-wip-us.apache.org/repos/asf/hive/blob/dbdd6116/ql/src/test/results/clientpositive/pointlookup3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out
new file mode 100644
index 0000000..4cfb97e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/pointlookup3.q.out
@@ -0,0 +1,1394 @@
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@pcr_t1
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ key
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ value
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ ds1
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ ds2
+ TOK_WHERE
+ or
+ and
+ =
+ TOK_TABLE_OR_COL
+ ds1
+ '2000-04-08'
+ =
+ TOK_TABLE_OR_COL
+ key
+ 1
+ and
+ =
+ TOK_TABLE_OR_COL
+ ds1
+ '2000-04-09'
+ =
+ TOK_TABLE_OR_COL
+ key
+ 2
+ TOK_ORDERBY
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ key
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ value
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ ds1
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ ds2
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: pcr_t1
+ Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: (struct(ds1,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ sort order: ++++
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-08
+ ds2 2001-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-09
+ ds2 2001-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+ Truncated Path -> Alias:
+ /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+ /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1]
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types int:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2)
+order by key, value, ds1, ds2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds1, ds2
+from pcr_t1
+where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2)
+order by key, value, ds1, ds2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ key
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ value
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ ds1
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ ds2
+ TOK_WHERE
+ or
+ and
+ and
+ =
+ TOK_TABLE_OR_COL
+ ds1
+ '2000-04-08'
+ =
+ TOK_TABLE_OR_COL
+ ds2
+ '2001-04-08'
+ =
+ TOK_TABLE_OR_COL
+ key
+ 1
+ and
+ and
+ =
+ TOK_TABLE_OR_COL
+ ds1
+ '2000-04-09'
+ =
+ TOK_TABLE_OR_COL
+ ds2
+ '2001-04-08'
+ =
+ TOK_TABLE_OR_COL
+ key
+ 2
+ TOK_ORDERBY
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ key
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ value
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ ds1
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ ds2
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: pcr_t1
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: (key = 1) (type: boolean)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string), ds1 (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 1 (type: int), _col1 (type: string), _col2 (type: string), '2001-04-08' (type: string)
+ sort order: ++++
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-08
+ ds2 2001-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+ Truncated Path -> Alias:
+ /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1]
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: 1 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), '2001-04-08' (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3
+ columns.types int:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08'
+order by t1.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08'
+order by t1.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t1
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t2
+ and
+ and
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ key
+ .
+ TOK_TABLE_OR_COL
+ t2
+ key
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ ds1
+ '2000-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ t2
+ ds2
+ '2001-04-08'
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+ TOK_ORDERBY
+ TOK_TABSORTCOLNAMEASC
+ .
+ TOK_TABLE_OR_COL
+ t1
+ key
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: value (type: string), ds2 (type: string)
+ auto parallelism: false
+ TableScan
+ alias: t2
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: value (type: string), ds1 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-08
+ ds2 2001-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+ Truncated Path -> Alias:
+ /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [t1, t2]
+ Needs Tagging: true
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3, _col7, _col8, _col9
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string), _col7 (type: int), _col8 (type: string), _col9 (type: string)
+ outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col6
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ GatherStats: false
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col1 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col6 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: -mr-10003
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col6
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col6
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ Truncated Path -> Alias:
+#### A masked pattern was here ####
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: string), '2001-04-08' (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int:string:string:string:int:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09'
+order by t1.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09'
+order by t1.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t1
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t2
+ and
+ and
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ key
+ .
+ TOK_TABLE_OR_COL
+ t2
+ key
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ ds1
+ '2000-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ t2
+ ds1
+ '2000-04-09'
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+ TOK_ORDERBY
+ TOK_TABSORTCOLNAMEASC
+ .
+ TOK_TABLE_OR_COL
+ t1
+ key
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: value (type: string), ds2 (type: string)
+ auto parallelism: false
+ TableScan
+ alias: t2
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: value (type: string), ds2 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-08
+ ds2 2001-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-09
+ ds2 2001-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+ Truncated Path -> Alias:
+ /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [t1]
+ /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [t2]
+ Needs Tagging: true
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3, _col7, _col8, _col10
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string), _col7 (type: int), _col8 (type: string), _col10 (type: string)
+ outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col7
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col7
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ GatherStats: false
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col1 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col7 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: -mr-10003
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col7
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col3,_col4,_col5,_col7
+ columns.types int,string,string,int,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ Truncated Path -> Alias:
+#### A masked pattern was here ####
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), '2000-04-09' (type: string), VALUE._col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int:string:string:string:int:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join JOIN[4][tables = [t1, t2]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2)
+order by t2.key, t2.value, t1.ds1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select *
+from pcr_t1 t1 join pcr_t1 t2
+where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2)
+order by t2.key, t2.value, t1.ds1
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t1
+ TOK_TABREF
+ TOK_TABNAME
+ pcr_t1
+ t2
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+ TOK_WHERE
+ or
+ and
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ ds1
+ '2000-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ t2
+ key
+ 1
+ and
+ =
+ .
+ TOK_TABLE_OR_COL
+ t1
+ ds1
+ '2000-04-09'
+ =
+ .
+ TOK_TABLE_OR_COL
+ t2
+ key
+ 2
+ TOK_ORDERBY
+ TOK_TABSORTCOLNAMEASC
+ .
+ TOK_TABLE_OR_COL
+ t2
+ key
+ TOK_TABSORTCOLNAMEASC
+ .
+ TOK_TABLE_OR_COL
+ t2
+ value
+ TOK_TABSORTCOLNAMEASC
+ .
+ TOK_TABLE_OR_COL
+ t1
+ ds1
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+ auto parallelism: false
+ TableScan
+ alias: t2
+ Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-08
+ ds2 2001-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-09
+ ds2 2001-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+#### A masked pattern was here ####
+ Partition
+ base file name: ds2=2001-04-10
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds1 2000-04-10
+ ds2 2001-04-10
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ numFiles 1
+ numRows 20
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ rawDataSize 160
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 180
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.pcr_t1
+ partition_columns ds1/ds2
+ partition_columns.types string:string
+ serialization.ddl struct pcr_t1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.pcr_t1
+ name: default.pcr_t1
+ Truncated Path -> Alias:
+ /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [t1, t2]
+ /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [t1, t2]
+ /pcr_t1/ds1=2000-04-10/ds2=2001-04-10 [t2]
+ Needs Tagging: true
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9, _col10
+ Statistics: Num rows: 66 Data size: 528 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((_col2) IN ('2000-04-08', '2000-04-09') and (struct(_col7,_col2)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09'))) (type: boolean)
+ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col7 (type: int), _col8 (type: string), _col9 (type: string), _col10 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int,string,string,string,int,string,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ GatherStats: false
+ Reduce Output Operator
+ key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string)
+ sort order: +++
+ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string), _col6 (type: string), _col7 (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: -mr-10003
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int,string,string,string,int,string,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int,string,string,string,int,string,string,string
+ escape.delim \
+ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ Truncated Path -> Alias:
+#### A masked pattern was here ####
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey2 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col3 (type: string), VALUE._col4 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+ columns.types int:string:string:string:int:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: drop table pcr_t1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@pcr_t1
+PREHOOK: Output: default@pcr_t1
+POSTHOOK: query: drop table pcr_t1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@pcr_t1
+POSTHOOK: Output: default@pcr_t1