You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/04/10 02:07:09 UTC
svn commit: r1466300 [3/13] - in /hive/trunk:
cli/src/java/org/apache/hadoop/hive/cli/
ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/
Modified: hive/trunk/ql/src/test/results/clientpositive/binary_output_format.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/binary_output_format.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/binary_output_format.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/binary_output_format.q.out Wed Apr 10 00:06:55 2013
@@ -1,4 +1,5 @@
-PREHOOK: query: CREATE TABLE dest1(mydata STRING)
+PREHOOK: query: -- Create a table with binary output format
+CREATE TABLE dest1(mydata STRING)
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
WITH SERDEPROPERTIES (
@@ -8,7 +9,8 @@ STORED AS
INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE dest1(mydata STRING)
+POSTHOOK: query: -- Create a table with binary output format
+CREATE TABLE dest1(mydata STRING)
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
WITH SERDEPROPERTIES (
@@ -19,7 +21,8 @@ STORED AS
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@dest1
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- Insert into that table using transform
+EXPLAIN EXTENDED
INSERT OVERWRITE TABLE dest1
SELECT TRANSFORM(*)
USING 'cat'
@@ -32,7 +35,8 @@ SELECT TRANSFORM(*)
RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader'
FROM src
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- Insert into that table using transform
+EXPLAIN EXTENDED
INSERT OVERWRITE TABLE dest1
SELECT TRANSFORM(*)
USING 'cat'
@@ -368,11 +372,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@dest1
POSTHOOK: Lineage: dest1.mydata SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM dest1
+PREHOOK: query: -- Test the result
+SELECT * FROM dest1
PREHOOK: type: QUERY
PREHOOK: Input: default@dest1
#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM dest1
+POSTHOOK: query: -- Test the result
+SELECT * FROM dest1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@dest1
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out Wed Apr 10 00:06:55 2013
@@ -2,9 +2,13 @@ PREHOOK: query: drop table ba_test
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table ba_test
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile
+PREHOOK: query: -- Tests everything in binary_table_colserde.q + uses LazyBinaryColumnarSerde
+
+create table ba_test (ba_key binary, ba_val binary) stored as rcfile
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile
+POSTHOOK: query: -- Tests everything in binary_table_colserde.q + uses LazyBinaryColumnarSerde
+
+create table ba_test (ba_key binary, ba_val binary) stored as rcfile
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@ba_test
PREHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'
Modified: hive/trunk/ql/src/test/results/clientpositive/binary_table_colserde.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/binary_table_colserde.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/binary_table_colserde.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/binary_table_colserde.q.out Wed Apr 10 00:06:55 2013
@@ -2,9 +2,13 @@ PREHOOK: query: drop table ba_test
PREHOOK: type: DROPTABLE
POSTHOOK: query: drop table ba_test
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile
+PREHOOK: query: -- Everything in ba_table1.q + columnar serde in RCFILE.
+
+create table ba_test (ba_key binary, ba_val binary) stored as rcfile
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile
+POSTHOOK: query: -- Everything in ba_table1.q + columnar serde in RCFILE.
+
+create table ba_test (ba_key binary, ba_val binary) stored as rcfile
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@ba_test
PREHOOK: query: describe extended ba_test
Modified: hive/trunk/ql/src/test/results/clientpositive/bucket_groupby.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucket_groupby.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucket_groupby.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucket_groupby.q.out Wed Apr 10 00:06:55 2013
@@ -1180,11 +1180,13 @@ STAGE PLANS:
limit: -1
-PREHOOK: query: drop table clustergroupby
+PREHOOK: query: -- number of buckets cannot be changed, so drop the table
+drop table clustergroupby
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@clustergroupby
PREHOOK: Output: default@clustergroupby
-POSTHOOK: query: drop table clustergroupby
+POSTHOOK: query: -- number of buckets cannot be changed, so drop the table
+drop table clustergroupby
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@clustergroupby
POSTHOOK: Output: default@clustergroupby
@@ -1583,11 +1585,13 @@ POSTHOOK: Lineage: clustergroupby PARTIT
111 1
113 2
114 1
-PREHOOK: query: drop table clustergroupby
+PREHOOK: query: -- number of buckets cannot be changed, so drop the table
+drop table clustergroupby
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@clustergroupby
PREHOOK: Output: default@clustergroupby
-POSTHOOK: query: drop table clustergroupby
+POSTHOOK: query: -- number of buckets cannot be changed, so drop the table
+drop table clustergroupby
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@clustergroupby
POSTHOOK: Output: default@clustergroupby
Modified: hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out Wed Apr 10 00:06:55 2013
@@ -32,10 +32,18 @@ PREHOOK: Output: default@table2
POSTHOOK: query: load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2
POSTHOOK: type: LOAD
POSTHOOK: Output: default@table2
-PREHOOK: query: explain extended
+PREHOOK: query: -- The tables are bucketed in same columns in different order,
+-- but sorted in different column orders
+-- Neither bucketed map-join, nor sort-merge join should be performed
+
+explain extended
select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
+POSTHOOK: query: -- The tables are bucketed in same columns in different order,
+-- but sorted in different column orders
+-- Neither bucketed map-join, nor sort-merge join should be performed
+
+explain extended
select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
POSTHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
Modified: hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out Wed Apr 10 00:06:55 2013
@@ -32,10 +32,18 @@ PREHOOK: Output: default@table2
POSTHOOK: query: load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2
POSTHOOK: type: LOAD
POSTHOOK: Output: default@table2
-PREHOOK: query: explain extended
+PREHOOK: query: -- The tables are bucketed in same columns in different order,
+-- but sorted in different column orders
+-- Neither bucketed map-join, nor sort-merge join should be performed
+
+explain extended
select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
+POSTHOOK: query: -- The tables are bucketed in same columns in different order,
+-- but sorted in different column orders
+-- Neither bucketed map-join, nor sort-merge join should be performed
+
+explain extended
select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
POSTHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_1.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_2.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_3.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_3.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_4.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_4.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_5.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_5.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_5.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_5.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_6.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_6.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_6.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_6.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small no part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small no part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_7.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_7.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_7.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_7.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketcontext_8.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketcontext_8.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketcontext_8.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketcontext_8.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@bucket_small
PREHOOK: query: load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out Wed Apr 10 00:06:55 2013
@@ -13,12 +13,14 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: explain extended
+PREHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
select /*+mapjoin(b)*/ a.key, a.value, b.value
from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
on a.key=b.key where b.ds="2008-04-08"
PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
+POSTHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
select /*+mapjoin(b)*/ a.key, a.value, b.value
from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
on a.key=b.key where b.ds="2008-04-08"
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin10.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin10.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin10.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin10.q.out Wed Apr 10 00:06:55 2013
@@ -100,12 +100,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin11.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin11.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin11.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin11.q.out Wed Apr 10 00:06:55 2013
@@ -104,12 +104,18 @@ PREHOOK: Output: default@srcbucket_mapjo
POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2')
POSTHOOK: type: LOAD
POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is
+-- a power of 2 and the bucketing columns match so bucket map join should be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is
+-- a power of 2 and the bucketing columns match so bucket map join should be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin12.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin12.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin12.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin12.q.out Wed Apr 10 00:06:55 2013
@@ -74,12 +74,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_3
POSTHOOK: Output: default@srcbucket_mapjoin_part_3
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
@@ -262,12 +266,16 @@ POSTHOOK: Input: default@srcbucket_mapjo
POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1
#### A masked pattern was here ####
464
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin13.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin13.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin13.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin13.q.out Wed Apr 10 00:06:55 2013
@@ -5,12 +5,14 @@ POSTHOOK: query: CREATE TABLE srcbucket_
CLUSTERED BY (value) INTO 2 BUCKETS
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@srcbucket_mapjoin_part_1
-PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1')
+PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1')
SELECT * FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1
-POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1')
+POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1')
SELECT * FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
@@ -27,12 +29,14 @@ POSTHOOK: Input: default@srcbucket_mapjo
POSTHOOK: Output: default@srcbucket_mapjoin_part_1
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2')
+PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2')
SELECT * FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2
-POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2')
+POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2')
SELECT * FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
@@ -52,12 +56,14 @@ POSTHOOK: Lineage: srcbucket_mapjoin_par
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1')
+PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1')
SELECT * FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1
-POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1')
+POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key'
+INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1')
SELECT * FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
@@ -68,12 +74,16 @@ POSTHOOK: Lineage: srcbucket_mapjoin_par
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value'
+-- and it is also being joined. So, bucketed map-join cannot be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value'
+-- and it is also being joined. So, bucketed map-join cannot be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
@@ -310,12 +320,16 @@ POSTHOOK: Lineage: srcbucket_mapjoin_par
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
2056
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key and a.part = '2'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key and a.part = '2'
@@ -524,12 +538,16 @@ POSTHOOK: Lineage: srcbucket_mapjoin_par
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
@@ -738,12 +756,18 @@ POSTHOOK: Lineage: srcbucket_mapjoin_par
POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+-- The fact that the table is being bucketed by 'value' does not matter
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key'
+-- and it is being joined. So, bucketed map-join can be performed
+-- The fact that the table is being bucketed by 'value' does not matter
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out Wed Apr 10 00:06:55 2013
@@ -1167,10 +1167,12 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
0 0 0
-PREHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+PREHOOK: query: -- HIVE-3210
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
PREHOOK: type: LOAD
PREHOOK: Output: default@srcbucket_mapjoin_part_2
-POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+POSTHOOK: query: -- HIVE-3210
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
POSTHOOK: type: LOAD
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin8.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin8.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin8.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin8.q.out Wed Apr 10 00:06:55 2013
@@ -46,12 +46,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
@@ -242,12 +246,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin9.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin9.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin9.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin9.q.out Wed Apr 10 00:06:55 2013
@@ -52,12 +52,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' and b.part = '1'
@@ -269,12 +273,16 @@ POSTHOOK: query: ALTER TABLE srcbucket_m
POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
POSTHOOK: Input: default@srcbucket_mapjoin_part_2
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: EXPLAIN EXTENDED
+PREHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' AND b.part = '1'
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
+POSTHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used
+
+EXPLAIN EXTENDED
SELECT /*+ MAPJOIN(b) */ count(*)
FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b
ON a.key = b.key AND a.part = '1' AND b.part = '1'
Modified: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out Wed Apr 10 00:06:55 2013
@@ -106,9 +106,11 @@ PREHOOK: Output: default@test4
POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4
POSTHOOK: type: LOAD
POSTHOOK: Output: default@test4
-PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
+PREHOOK: query: -- should be allowed
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
+POSTHOOK: query: -- should be allowed
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
POSTHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
@@ -404,9 +406,11 @@ STAGE PLANS:
limit: -1
-PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
+PREHOOK: query: -- should not apply bucket mapjoin
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
+POSTHOOK: query: -- should not apply bucket mapjoin
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
POSTHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (= (+ (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL L) key)) (. (TOK_TABLE_OR_COL R) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
Modified: hive/trunk/ql/src/test/results/clientpositive/column_access_stats.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/column_access_stats.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/column_access_stats.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/column_access_stats.q.out Wed Apr 10 00:06:55 2013
@@ -1,4 +1,6 @@
-PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: query: -- This test is used for testing the ColumnAccessAnalyzer
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
PREHOOK: type: LOAD
@@ -7,7 +9,8 @@ PREHOOK: query: CREATE TABLE T2(key STRI
PREHOOK: type: CREATETABLE
PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
-PREHOOK: query: SELECT key FROM T1 ORDER BY key
+PREHOOK: query: -- Simple select queries
+SELECT key FROM T1 ORDER BY key
PREHOOK: type: QUERY
PREHOOK: Input: default@t1
#### A masked pattern was here ####
@@ -43,7 +46,8 @@ PREHOOK: Input: default@t1
1
1
1
-PREHOOK: query: EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 ORDER BY key
+PREHOOK: query: -- More complicated select queries
+EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 ORDER BY key
PREHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL val))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))
@@ -183,7 +187,8 @@ Columns:key,val
24.0
26.0
36.0
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Work with union
+EXPLAIN
SELECT * FROM (
SELECT key as c FROM T1
UNION ALL
@@ -385,7 +390,8 @@ Columns:key
8
8
8
-PREHOOK: query: FROM T1
+PREHOOK: query: -- Work with insert overwrite
+FROM T1
INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key
INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key
PREHOOK: type: QUERY
@@ -395,7 +401,8 @@ PREHOOK: Output: default@t3
Table:default@t1
Columns:key,val
-PREHOOK: query: SELECT *
+PREHOOK: query: -- Simple joins
+SELECT *
FROM T1 JOIN T2
ON T1.key = T2.key
ORDER BY T1.key, T1.val, T2.key, T2.val
@@ -542,7 +549,8 @@ Columns:key,val
Table:default@t1
Columns:key,val
-PREHOOK: query: SELECT /*+ MAPJOIN(a) */ *
+PREHOOK: query: -- Map join
+SELECT /*+ MAPJOIN(a) */ *
FROM T1 a JOIN T2 b
ON a.key = b.key
ORDER BY a.key, a.val, b.key, b.val
@@ -562,7 +570,8 @@ Columns:key,val
7 17 7 1
8 18 8 2
8 28 8 2
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- More joins
+EXPLAIN
SELECT *
FROM T1 JOIN T2
ON T1.key = T2.key AND T1.val = 3 and T2.val = 3
@@ -837,7 +846,8 @@ Columns:key,val
Table:default@t1
Columns:key,val
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- Join followed by join
+EXPLAIN
SELECT *
FROM
(
Modified: hive/trunk/ql/src/test/results/clientpositive/combine2_hadoop20.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/combine2_hadoop20.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/combine2_hadoop20.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/combine2_hadoop20.q.out Wed Apr 10 00:06:55 2013
@@ -2,12 +2,28 @@ PREHOOK: query: USE default
PREHOOK: type: SWITCHDATABASE
POSTHOOK: query: USE default
POSTHOOK: type: SWITCHDATABASE
-PREHOOK: query: create table combine2(key string) partitioned by (value string)
+PREHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+
+create table combine2(key string) partitioned by (value string)
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table combine2(key string) partitioned by (value string)
+POSTHOOK: query: -- EXCLUDE_OS_WINDOWS
+-- excluded on windows because of difference in file name encoding logic
+
+
+create table combine2(key string) partitioned by (value string)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@combine2
-PREHOOK: query: insert overwrite table combine2 partition(value)
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
+-- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
+-- in an attempt to force the generation of multiple splits and multiple output files.
+-- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
+-- when using CombineFileInputFormat, so only one split is generated. This has a
+-- significant impact on the results results of this test.
+-- This issue was fixed in MAPREDUCE-2046 which is included in 0.22.
+
+insert overwrite table combine2 partition(value)
select * from (
select key, value from src where key < 10
union all
@@ -17,7 +33,15 @@ select * from (
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@combine2
-POSTHOOK: query: insert overwrite table combine2 partition(value)
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
+-- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
+-- in an attempt to force the generation of multiple splits and multiple output files.
+-- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
+-- when using CombineFileInputFormat, so only one split is generated. This has a
+-- significant impact on the results results of this test.
+-- This issue was fixed in MAPREDUCE-2046 which is included in 0.22.
+
+insert overwrite table combine2 partition(value)
select * from (
select key, value from src where key < 10
union all
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_binary.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_binary.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_binary.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_binary.q.out Wed Apr 10 00:06:55 2013
@@ -3,10 +3,12 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: create table tab_binary(a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@tab_binary
-PREHOOK: query: LOAD DATA LOCAL INPATH "../data/files/binary.txt" INTO TABLE tab_binary
+PREHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/binary.txt" INTO TABLE tab_binary
PREHOOK: type: LOAD
PREHOOK: Output: default@tab_binary
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../data/files/binary.txt" INTO TABLE tab_binary
+POSTHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/binary.txt" INTO TABLE tab_binary
POSTHOOK: type: LOAD
POSTHOOK: Output: default@tab_binary
PREHOOK: query: select count(*) from tab_binary
@@ -18,11 +20,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_binary
#### A masked pattern was here ####
10
-PREHOOK: query: select compute_stats(a, 16) from tab_binary
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_binary
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_binary
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_binary
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_binary
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_binary
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_boolean.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_boolean.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_boolean.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_boolean.q.out Wed Apr 10 00:06:55 2013
@@ -3,10 +3,12 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: create table tab_bool(a boolean)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@tab_bool
-PREHOOK: query: LOAD DATA LOCAL INPATH "../data/files/bool.txt" INTO TABLE tab_bool
+PREHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/bool.txt" INTO TABLE tab_bool
PREHOOK: type: LOAD
PREHOOK: Output: default@tab_bool
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../data/files/bool.txt" INTO TABLE tab_bool
+POSTHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/bool.txt" INTO TABLE tab_bool
POSTHOOK: type: LOAD
POSTHOOK: Output: default@tab_bool
PREHOOK: query: select count(*) from tab_bool
@@ -18,11 +20,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_bool
#### A masked pattern was here ####
33
-PREHOOK: query: select compute_stats(a, 16) from tab_bool
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_bool
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_bool
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_bool
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_bool
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_bool
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_double.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_double.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_double.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_double.q.out Wed Apr 10 00:06:55 2013
@@ -3,10 +3,12 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: create table tab_double(a double)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@tab_double
-PREHOOK: query: LOAD DATA LOCAL INPATH "../data/files/double.txt" INTO TABLE tab_double
+PREHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/double.txt" INTO TABLE tab_double
PREHOOK: type: LOAD
PREHOOK: Output: default@tab_double
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../data/files/double.txt" INTO TABLE tab_double
+POSTHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/double.txt" INTO TABLE tab_double
POSTHOOK: type: LOAD
POSTHOOK: Output: default@tab_double
PREHOOK: query: select count(*) from tab_double
@@ -18,11 +20,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_double
#### A masked pattern was here ####
16
-PREHOOK: query: select compute_stats(a, 16) from tab_double
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_double
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_double
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_double
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_double
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_double
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out Wed Apr 10 00:06:55 2013
@@ -12,11 +12,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_empty
#### A masked pattern was here ####
0
-PREHOOK: query: select compute_stats(a, 16) from tab_empty
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_empty
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_empty
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_empty
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_empty
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_empty
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_long.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_long.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_long.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_long.q.out Wed Apr 10 00:06:55 2013
@@ -3,10 +3,12 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: create table tab_int(a int)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@tab_int
-PREHOOK: query: LOAD DATA LOCAL INPATH "../data/files/int.txt" INTO TABLE tab_int
+PREHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/int.txt" INTO TABLE tab_int
PREHOOK: type: LOAD
PREHOOK: Output: default@tab_int
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../data/files/int.txt" INTO TABLE tab_int
+POSTHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/int.txt" INTO TABLE tab_int
POSTHOOK: type: LOAD
POSTHOOK: Output: default@tab_int
PREHOOK: query: select count(*) from tab_int
@@ -18,11 +20,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_int
#### A masked pattern was here ####
12
-PREHOOK: query: select compute_stats(a, 16) from tab_int
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_int
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_int
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_int
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_int
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_int
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/compute_stats_string.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/compute_stats_string.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/compute_stats_string.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/compute_stats_string.q.out Wed Apr 10 00:06:55 2013
@@ -3,10 +3,12 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: create table tab_string(a string)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@tab_string
-PREHOOK: query: LOAD DATA LOCAL INPATH "../data/files/string.txt" INTO TABLE tab_string
+PREHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/string.txt" INTO TABLE tab_string
PREHOOK: type: LOAD
PREHOOK: Output: default@tab_string
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../data/files/string.txt" INTO TABLE tab_string
+POSTHOOK: query: -- insert some data
+LOAD DATA LOCAL INPATH "../data/files/string.txt" INTO TABLE tab_string
POSTHOOK: type: LOAD
POSTHOOK: Output: default@tab_string
PREHOOK: query: select count(*) from tab_string
@@ -18,11 +20,13 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_string
#### A masked pattern was here ####
10
-PREHOOK: query: select compute_stats(a, 16) from tab_string
+PREHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_string
PREHOOK: type: QUERY
PREHOOK: Input: default@tab_string
#### A masked pattern was here ####
-POSTHOOK: query: select compute_stats(a, 16) from tab_string
+POSTHOOK: query: -- compute statistical summary of data
+select compute_stats(a, 16) from tab_string
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tab_string
#### A masked pattern was here ####
Modified: hive/trunk/ql/src/test/results/clientpositive/convert_enum_to_string.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/convert_enum_to_string.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/convert_enum_to_string.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/convert_enum_to_string.q.out Wed Apr 10 00:06:55 2013
@@ -1,11 +1,15 @@
-PREHOOK: query: create table convert_enum_to_string
+PREHOOK: query: -- Ensure Enum fields are converted to strings (instead of struct<value:int>)
+
+create table convert_enum_to_string
partitioned by (b string)
row format serde "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer"
with serdeproperties (
"serialization.class"="org.apache.hadoop.hive.serde2.thrift.test.MegaStruct",
"serialization.format"="org.apache.thrift.protocol.TBinaryProtocol")
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table convert_enum_to_string
+POSTHOOK: query: -- Ensure Enum fields are converted to strings (instead of struct<value:int>)
+
+create table convert_enum_to_string
partitioned by (b string)
row format serde "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer"
with serdeproperties (
Modified: hive/trunk/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out Wed Apr 10 00:06:55 2013
@@ -1,7 +1,15 @@
-PREHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING)
+PREHOOK: query: -- Test stored as directories
+-- it covers a few cases
+
+-- 1. create a table with stored as directories
+CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING)
SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING)
+POSTHOOK: query: -- Test stored as directories
+-- it covers a few cases
+
+-- 1. create a table with stored as directories
+CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING)
SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@stored_as_dirs_multiple
@@ -38,11 +46,13 @@ Skewed Columns: [col1, col2]
Skewed Values: [[s1, 1], [s3, 3], [s13, 13], [s78, 78]]
Storage Desc Params:
serialization.format 1
-PREHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES
+PREHOOK: query: -- 2. turn off stored as directories but table is still a skewed table
+alter table stored_as_dirs_multiple not stored as DIRECTORIES
PREHOOK: type: ALTERTABLE_SKEWED
PREHOOK: Input: default@stored_as_dirs_multiple
PREHOOK: Output: default@stored_as_dirs_multiple
-POSTHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES
+POSTHOOK: query: -- 2. turn off stored as directories but table is still a skewed table
+alter table stored_as_dirs_multiple not stored as DIRECTORIES
POSTHOOK: type: ALTERTABLE_SKEWED
POSTHOOK: Input: default@stored_as_dirs_multiple
POSTHOOK: Output: default@stored_as_dirs_multiple
@@ -78,11 +88,13 @@ Skewed Columns: [col1, col2]
Skewed Values: [[s1, 1], [s3, 3], [s13, 13], [s78, 78]]
Storage Desc Params:
serialization.format 1
-PREHOOK: query: alter table stored_as_dirs_multiple not skewed
+PREHOOK: query: -- 3. turn off skewed
+alter table stored_as_dirs_multiple not skewed
PREHOOK: type: ALTERTABLE_SKEWED
PREHOOK: Input: default@stored_as_dirs_multiple
PREHOOK: Output: default@stored_as_dirs_multiple
-POSTHOOK: query: alter table stored_as_dirs_multiple not skewed
+POSTHOOK: query: -- 3. turn off skewed
+alter table stored_as_dirs_multiple not skewed
POSTHOOK: type: ALTERTABLE_SKEWED
POSTHOOK: Input: default@stored_as_dirs_multiple
POSTHOOK: Output: default@stored_as_dirs_multiple
@@ -116,9 +128,11 @@ Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
-PREHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING)
+PREHOOK: query: -- 4. alter a table to stored as directories
+CREATE TABLE stored_as_dirs_single (key STRING, value STRING)
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING)
+POSTHOOK: query: -- 4. alter a table to stored as directories
+CREATE TABLE stored_as_dirs_single (key STRING, value STRING)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@stored_as_dirs_single
PREHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6')
@@ -163,11 +177,13 @@ Skewed Columns: [key]
Skewed Values: [[1], [5], [6]]
Storage Desc Params:
serialization.format 1
-PREHOOK: query: alter table stored_as_dirs_single not skewed
+PREHOOK: query: -- 5. turn off skewed should turn off stored as directories too
+alter table stored_as_dirs_single not skewed
PREHOOK: type: ALTERTABLE_SKEWED
PREHOOK: Input: default@stored_as_dirs_single
PREHOOK: Output: default@stored_as_dirs_single
-POSTHOOK: query: alter table stored_as_dirs_single not skewed
+POSTHOOK: query: -- 5. turn off skewed should turn off stored as directories too
+alter table stored_as_dirs_single not skewed
POSTHOOK: type: ALTERTABLE_SKEWED
POSTHOOK: Input: default@stored_as_dirs_single
POSTHOOK: Output: default@stored_as_dirs_single
@@ -200,12 +216,14 @@ Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
-PREHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6')
+PREHOOK: query: -- 6. turn on stored as directories again
+alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6')
stored as DIRECTORIES
PREHOOK: type: ALTERTABLE_SKEWED
PREHOOK: Input: default@stored_as_dirs_single
PREHOOK: Output: default@stored_as_dirs_single
-POSTHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6')
+POSTHOOK: query: -- 6. turn on stored as directories again
+alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6')
stored as DIRECTORIES
POSTHOOK: type: ALTERTABLE_SKEWED
POSTHOOK: Input: default@stored_as_dirs_single
@@ -242,9 +260,11 @@ Skewed Columns: [key]
Skewed Values: [[1], [5], [6]]
Storage Desc Params:
serialization.format 1
-PREHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single
+PREHOOK: query: -- 7. create table like
+create table stored_as_dirs_single_like like stored_as_dirs_single
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single
+POSTHOOK: query: -- 7. create table like
+create table stored_as_dirs_single_like like stored_as_dirs_single
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@stored_as_dirs_single_like
PREHOOK: query: describe formatted stored_as_dirs_single_like
@@ -279,11 +299,13 @@ Skewed Columns: [key]
Skewed Values: [[1], [5], [6]]
Storage Desc Params:
serialization.format 1
-PREHOOK: query: drop table stored_as_dirs_single
+PREHOOK: query: -- cleanup
+drop table stored_as_dirs_single
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@stored_as_dirs_single
PREHOOK: Output: default@stored_as_dirs_single
-POSTHOOK: query: drop table stored_as_dirs_single
+POSTHOOK: query: -- cleanup
+drop table stored_as_dirs_single
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@stored_as_dirs_single
POSTHOOK: Output: default@stored_as_dirs_single
Modified: hive/trunk/ql/src/test/results/clientpositive/create_big_view.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_big_view.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_big_view.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_big_view.q.out Wed Apr 10 00:06:55 2013
@@ -2,7 +2,9 @@ PREHOOK: query: DROP VIEW big_view
PREHOOK: type: DROPVIEW
POSTHOOK: query: DROP VIEW big_view
POSTHOOK: type: DROPVIEW
-PREHOOK: query: CREATE VIEW big_view AS SELECT
+PREHOOK: query: -- Define a view with long SQL text to test metastore and other limits.
+
+CREATE VIEW big_view AS SELECT
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS a,
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
@@ -240,7 +242,9 @@ PREHOOK: query: CREATE VIEW big_view AS
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
FROM src
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: CREATE VIEW big_view AS SELECT
+POSTHOOK: query: -- Define a view with long SQL text to test metastore and other limits.
+
+CREATE VIEW big_view AS SELECT
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS a,
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
Modified: hive/trunk/ql/src/test/results/clientpositive/create_like2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_like2.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_like2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_like2.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,12 @@
-PREHOOK: query: CREATE TABLE table1(a INT, b STRING)
+PREHOOK: query: -- Tests the copying over of Table Parameters according to a HiveConf setting
+-- when doing a CREATE TABLE LIKE.
+
+CREATE TABLE table1(a INT, b STRING)
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE table1(a INT, b STRING)
+POSTHOOK: query: -- Tests the copying over of Table Parameters according to a HiveConf setting
+-- when doing a CREATE TABLE LIKE.
+
+CREATE TABLE table1(a INT, b STRING)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@table1
PREHOOK: query: ALTER TABLE table1 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4')
Modified: hive/trunk/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_like_tbl_props.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_like_tbl_props.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_like_tbl_props.q.out Wed Apr 10 00:06:55 2013
@@ -1,6 +1,10 @@
-PREHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value')
+PREHOOK: query: -- Test that CREATE TABLE LIKE commands can take explicit table properties
+
+CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value')
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value')
+POSTHOOK: query: -- Test that CREATE TABLE LIKE commands can take explicit table properties
+
+CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value')
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@test_table
PREHOOK: query: DESC FORMATTED test_table
@@ -72,9 +76,13 @@ Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
-PREHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2')
+PREHOOK: query: -- Test that CREATE TABLE LIKE commands can take default and explicit table properties
+
+CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2')
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2')
+POSTHOOK: query: -- Test that CREATE TABLE LIKE commands can take default and explicit table properties
+
+CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2')
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@test_table2
PREHOOK: query: DESC FORMATTED test_table2
@@ -108,9 +116,13 @@ Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
-PREHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3')
+PREHOOK: query: -- Test that properties inherited are overwritten by explicitly set ones
+
+CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3')
PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3')
+POSTHOOK: query: -- Test that properties inherited are overwritten by explicitly set ones
+
+CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3')
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@test_table3
PREHOOK: query: DESC FORMATTED test_table3
Modified: hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_like_view.q.out Wed Apr 10 00:06:55 2013
@@ -239,9 +239,11 @@ POSTHOOK: Lineage: table1.a SIMPLE [(src
POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: table2.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: table2.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart
+PREHOOK: query: -- check partitions
+create view view1 partitioned on (ds, hr) as select * from srcpart
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart
+POSTHOOK: query: -- check partitions
+create view view1 partitioned on (ds, hr) as select * from srcpart
POSTHOOK: type: CREATEVIEW
POSTHOOK: Output: default@view1
POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
Modified: hive/trunk/ql/src/test/results/clientpositive/create_or_replace_view.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/create_or_replace_view.q.out?rev=1466300&r1=1466299&r2=1466300&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/create_or_replace_view.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/create_or_replace_view.q.out Wed Apr 10 00:06:55 2013
@@ -39,9 +39,11 @@ Sort Columns: []
# View Information
View Original Text: select * from srcpart
View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart`
-PREHOOK: query: create or replace view v partitioned on (ds, hr) as select * from srcpart
+PREHOOK: query: -- modifying definition of unpartitioned view
+create or replace view v partitioned on (ds, hr) as select * from srcpart
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: create or replace view v partitioned on (ds, hr) as select * from srcpart
+POSTHOOK: query: -- modifying definition of unpartitioned view
+create or replace view v partitioned on (ds, hr) as select * from srcpart
POSTHOOK: type: CREATEVIEW
POSTHOOK: Output: default@v
PREHOOK: query: alter view v add partition (ds='2008-04-08',hr='11')
@@ -123,9 +125,11 @@ POSTHOOK: query: show partitions v
POSTHOOK: type: SHOWPARTITIONS
ds=2008-04-08/hr=11
ds=2008-04-08/hr=12
-PREHOOK: query: create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart
+PREHOOK: query: -- altering partitioned view 1
+create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart
+POSTHOOK: query: -- altering partitioned view 1
+create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart
POSTHOOK: type: CREATEVIEW
POSTHOOK: Output: default@v
PREHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11'
@@ -184,9 +188,11 @@ POSTHOOK: query: show partitions v
POSTHOOK: type: SHOWPARTITIONS
ds=2008-04-08/hr=11
ds=2008-04-08/hr=12
-PREHOOK: query: create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart
+PREHOOK: query: -- altering partitioned view 2
+create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart
+POSTHOOK: query: -- altering partitioned view 2
+create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart
POSTHOOK: type: CREATEVIEW
POSTHOOK: Output: default@v
PREHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11'
@@ -254,9 +260,11 @@ POSTHOOK: query: drop view v
POSTHOOK: type: DROPVIEW
POSTHOOK: Input: default@v
POSTHOOK: Output: default@v
-PREHOOK: query: create table srcpart_temp like srcpart
+PREHOOK: query: -- updating to fix view with invalid definition
+create table srcpart_temp like srcpart
PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table srcpart_temp like srcpart
+POSTHOOK: query: -- updating to fix view with invalid definition
+create table srcpart_temp like srcpart
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@srcpart_temp
PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart_temp
@@ -272,9 +280,11 @@ POSTHOOK: query: drop table srcpart_temp
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@srcpart_temp
POSTHOOK: Output: default@srcpart_temp
-PREHOOK: query: create or replace view v partitioned on (ds, hr) as select * from srcpart
+PREHOOK: query: -- v is now invalid
+create or replace view v partitioned on (ds, hr) as select * from srcpart
PREHOOK: type: CREATEVIEW
-POSTHOOK: query: create or replace view v partitioned on (ds, hr) as select * from srcpart
+POSTHOOK: query: -- v is now invalid
+create or replace view v partitioned on (ds, hr) as select * from srcpart
POSTHOOK: type: CREATEVIEW
POSTHOOK: Output: default@v
PREHOOK: query: describe formatted v