You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by cw...@apache.org on 2011/12/09 00:38:53 UTC
svn commit: r1212184 - in /hive/branches/branch-0.8-r2: RELEASE_NOTES.txt
ql/src/test/queries/clientpositive/union25.q
ql/src/test/results/clientpositive/union25.q.out
Author: cws
Date: Thu Dec 8 23:38:53 2011
New Revision: 1212184
URL: http://svn.apache.org/viewvc?rev=1212184&view=rev
Log:
HIVE-2634. revert HIVE-2566 including missing sections (Namit Jain via cws)
Added:
hive/branches/branch-0.8-r2/ql/src/test/queries/clientpositive/union25.q
hive/branches/branch-0.8-r2/ql/src/test/results/clientpositive/union25.q.out
Modified:
hive/branches/branch-0.8-r2/RELEASE_NOTES.txt
Modified: hive/branches/branch-0.8-r2/RELEASE_NOTES.txt
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.8-r2/RELEASE_NOTES.txt?rev=1212184&r1=1212183&r2=1212184&view=diff
==============================================================================
--- hive/branches/branch-0.8-r2/RELEASE_NOTES.txt (original)
+++ hive/branches/branch-0.8-r2/RELEASE_NOTES.txt Thu Dec 8 23:38:53 2011
@@ -219,6 +219,7 @@ Release Notes - Hive - Version 0.8.0
* [HIVE-2624] - Fix eclipse classpath template broken in HIVE-2523
* [HIVE-2625] - Fix maven-build Ant target
* [HIVE-2630] - TestHiveServer doesn't produce a JUnit report file
+ * [HIVE-2634] - revert HIVE-2566
** Improvement
* [HIVE-1078] - CREATE VIEW followup: CREATE OR REPLACE
Added: hive/branches/branch-0.8-r2/ql/src/test/queries/clientpositive/union25.q
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.8-r2/ql/src/test/queries/clientpositive/union25.q?rev=1212184&view=auto
==============================================================================
--- hive/branches/branch-0.8-r2/ql/src/test/queries/clientpositive/union25.q (added)
+++ hive/branches/branch-0.8-r2/ql/src/test/queries/clientpositive/union25.q Thu Dec 8 23:38:53 2011
@@ -0,0 +1,23 @@
+create table tmp_srcpart like srcpart;
+
+insert overwrite table tmp_srcpart partition (ds='2008-04-08', hr='11')
+select key, value from srcpart where ds='2008-04-08' and hr='11';
+
+explain
+create table tmp_unionall as
+SELECT count(1) as counts, key, value
+FROM
+(
+ SELECT key, value FROM srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+
+ UNION ALL
+
+ SELECT key, key as value FROM (
+ SELECT distinct key FROM (
+ SELECT key, value FROM tmp_srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+ UNION ALL
+ SELECT key, value FROM tmp_srcpart b WHERE b.ds='2008-04-08' and b.hr='11'
+ )t
+ ) master_table
+) a GROUP BY key, value
+;
Added: hive/branches/branch-0.8-r2/ql/src/test/results/clientpositive/union25.q.out
URL: http://svn.apache.org/viewvc/hive/branches/branch-0.8-r2/ql/src/test/results/clientpositive/union25.q.out?rev=1212184&view=auto
==============================================================================
--- hive/branches/branch-0.8-r2/ql/src/test/results/clientpositive/union25.q.out (added)
+++ hive/branches/branch-0.8-r2/ql/src/test/results/clientpositive/union25.q.out Thu Dec 8 23:38:53 2011
@@ -0,0 +1,330 @@
+PREHOOK: query: create table tmp_srcpart like srcpart
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table tmp_srcpart like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@tmp_srcpart
+PREHOOK: query: insert overwrite table tmp_srcpart partition (ds='2008-04-08', hr='11')
+select key, value from srcpart where ds='2008-04-08' and hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@tmp_srcpart@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table tmp_srcpart partition (ds='2008-04-08', hr='11')
+select key, value from srcpart where ds='2008-04-08' and hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@tmp_srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: tmp_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+create table tmp_unionall as
+SELECT count(1) as counts, key, value
+FROM
+(
+ SELECT key, value FROM srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+
+ UNION ALL
+
+ SELECT key, key as value FROM (
+ SELECT distinct key FROM (
+ SELECT key, value FROM tmp_srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+ UNION ALL
+ SELECT key, value FROM tmp_srcpart b WHERE b.ds='2008-04-08' and b.hr='11'
+ )t
+ ) master_table
+) a GROUP BY key, value
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain
+create table tmp_unionall as
+SELECT count(1) as counts, key, value
+FROM
+(
+ SELECT key, value FROM srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+
+ UNION ALL
+
+ SELECT key, key as value FROM (
+ SELECT distinct key FROM (
+ SELECT key, value FROM tmp_srcpart a WHERE a.ds='2008-04-08' and a.hr='11'
+ UNION ALL
+ SELECT key, value FROM tmp_srcpart b WHERE b.ds='2008-04-08' and b.hr='11'
+ )t
+ ) master_table
+) a GROUP BY key, value
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Lineage: tmp_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+ (TOK_CREATETABLE (TOK_TABNAME tmp_unionall) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL a) hr) '11'))))) (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tmp_srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL a) hr) '11'))))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tmp_srcpart) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_
COL value))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL b) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL b) hr) '11')))))) t)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (TOK_TABLE_OR_COL key))))) master_table)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL key) value))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1) counts) (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value)))))
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-5
+ Stage-3 depends on stages: Stage-2, Stage-6
+ Stage-0 depends on stages: Stage-3
+ Stage-7 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-7
+ Stage-5 is a root stage
+ Stage-6 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Alias -> Map Operator Tree:
+ null-subquery2:a-subquery2:master_table-subquery1:t-subquery1:a
+ TableScan
+ alias: a
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-2
+ Map Reduce
+ Alias -> Map Operator Tree:
+ file:/tmp/njain/hive_2011-12-07_21-55-17_940_6667012806736485678/-mr-10002
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ outputColumnNames: _col0
+ Group By Operator
+ bucketGroup: false
+ keys:
+ expr: _col0
+ type: string
+ mode: hash
+ outputColumnNames: _col0
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ sort order: +
+ Map-reduce partition columns:
+ expr: _col0
+ type: string
+ tag: -1
+ file:/tmp/njain/hive_2011-12-07_21-55-17_940_6667012806736485678/-mr-10004
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ outputColumnNames: _col0
+ Group By Operator
+ bucketGroup: false
+ keys:
+ expr: _col0
+ type: string
+ mode: hash
+ outputColumnNames: _col0
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ sort order: +
+ Map-reduce partition columns:
+ expr: _col0
+ type: string
+ tag: -1
+ Reduce Operator Tree:
+ Group By Operator
+ bucketGroup: false
+ keys:
+ expr: KEY._col0
+ type: string
+ mode: mergepartial
+ outputColumnNames: _col0
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ outputColumnNames: _col0
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col0
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-3
+ Map Reduce
+ Alias -> Map Operator Tree:
+ file:/tmp/njain/hive_2011-12-07_21-55-17_940_6667012806736485678/-mr-10003
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1
+ Group By Operator
+ aggregations:
+ expr: count(1)
+ bucketGroup: false
+ keys:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
+ Map-reduce partition columns:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ tag: -1
+ value expressions:
+ expr: _col2
+ type: bigint
+ file:/tmp/njain/hive_2011-12-07_21-55-17_940_6667012806736485678/-mr-10005
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1
+ Group By Operator
+ aggregations:
+ expr: count(1)
+ bucketGroup: false
+ keys:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
+ Map-reduce partition columns:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ tag: -1
+ value expressions:
+ expr: _col2
+ type: bigint
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations:
+ expr: count(VALUE._col0)
+ bucketGroup: false
+ keys:
+ expr: KEY._col0
+ type: string
+ expr: KEY._col1
+ type: string
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Select Operator
+ expressions:
+ expr: _col2
+ type: bigint
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1, _col2
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ name: default.tmp_unionall
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+ destination: pfile:/data/users/njain/hive_commit2/build/ql/test/data/warehouse/tmp_unionall
+
+ Stage: Stage-7
+ Create Table Operator:
+ Create Table
+ columns: counts bigint, key string, value string
+ if not exists: false
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ # buckets: -1
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ name: tmp_unionall
+ isExternal: false
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-5
+ Map Reduce
+ Alias -> Map Operator Tree:
+ null-subquery2:a-subquery2:master_table-subquery2:t-subquery2:b
+ TableScan
+ alias: b
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-6
+ Map Reduce
+ Alias -> Map Operator Tree:
+ null-subquery1:a-subquery1:a
+ TableScan
+ alias: a
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+