You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/27 20:57:03 UTC

svn commit: r1634671 [15/46] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,271 @@
+PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) 
+partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) 
+partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string,  c25 string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T2
+POSTHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string,  c25 string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: create table T3 (c0 bigint,  c1 bigint, c2 int) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T3
+POSTHOOK: query: create table T3 (c0 bigint,  c1 bigint, c2 int) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T3
+PREHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c8
 1 bigint, c82 bigint, c83 bigint) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T4
+POSTHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c
 81 bigint, c82 bigint, c83 bigint) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T4
+PREHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1',  0, 0,4 from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1@ds=2010-04-17
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1',  0, 0,4 from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1@ds=2010-04-17
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE []
+PREHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t2@ds=2010-04-17
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t2@ds=2010-04-17
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE []
+PREHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t3@ds=2010-04-17
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t3@ds=2010-04-17
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION []
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION []
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE []
+PREHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') 
+select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t4@ds=2010-04-17
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') 
+select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t4@ds=2010-04-17
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE []
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@ds=2010-04-17
+#### A masked pattern was here ####
+5	name	NULL	2	kavin	NULL	9	c	8	0	0	7	1	2	0	3	2	NULL	1	NULL	3	2	0	0	5	10	2010-04-17
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1@ds=2010-04-17
+#### A masked pattern was here ####
+5	1	1	1	0	0	4	2010-04-17
+PREHOOK: query: select * from T3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t3@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t3@ds=2010-04-17
+#### A masked pattern was here ####
+4	5	0	2010-04-17
+PREHOOK: query: select * from T4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+4	1	1	8	4	5	1	0	9	U	2	2	0	2	1	1	J	C	A	U	2	s	2	NULL	NULL	NULL	NULL	NULL	NULL	1	j	S	6	NULL	1	2	J	g	1	e	2	1	2	U	P	p	3	0	0	0	1	1	1	0	0	0	6	2	j	NULL	NULL	NULL	NULL	NULL	NULL	5	NULL	NULL	j	2	2	1	2	2	1	1	1	1	1	1	1	1	32	NULL	2010-04-17
+PREHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0
+FROM T1 a JOIN T2 b 
+       ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17')
+     JOIN T3 c 
+       ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17')
+     JOIN T4 d 
+       ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1@ds=2010-04-17
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@ds=2010-04-17
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t3@ds=2010-04-17
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0
+FROM T1 a JOIN T2 b 
+       ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17')
+     JOIN T3 c 
+       ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17')
+     JOIN T4 d 
+       ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1@ds=2010-04-17
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@ds=2010-04-17
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t3@ds=2010-04-17
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+5	5	4

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,215 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+-- try the query without indexing, with manual indexing, and with automatic indexing
+
+EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+-- try the query without indexing, with manual indexing, and with automatic indexing
+
+EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value is not null and (key > 70)) and (key < 90)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: key (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value is not null and (key > 80)) and (key < 100)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: key (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col5
+                Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col5 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+82	82
+83	83
+83	83
+83	83
+83	83
+84	84
+84	84
+84	84
+84	84
+85	85
+86	86
+87	87
+PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
+PREHOOK: type: CREATEINDEX
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
+POSTHOOK: type: CREATEINDEX
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@default__src_src_index__
+PREHOOK: query: ALTER INDEX src_index ON src REBUILD
+PREHOOK: type: ALTERINDEX_REBUILD
+PREHOOK: Input: default@src
+PREHOOK: Output: default@default__src_src_index__
+POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
+POSTHOOK: type: ALTERINDEX_REBUILD
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@default__src_src_index__
+POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
+POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
+POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
+POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  filterExpr: ((value is not null and (key > 70)) and (key < 90)) (type: boolean)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value is not null and (key > 70)) and (key < 90)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: key (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  filterExpr: ((value is not null and (key > 80)) and (key < 100)) (type: boolean)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value is not null and (key > 80)) and (key < 100)) (type: boolean)
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: key (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col5
+                Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col5 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+82	82
+83	83
+83	83
+83	83
+83	83
+84	84
+84	84
+84	84
+84	84
+85	85
+86	86
+87	87
+PREHOOK: query: DROP INDEX src_index on src
+PREHOOK: type: DROPINDEX
+PREHOOK: Input: default@src
+POSTHOOK: query: DROP INDEX src_index on src
+POSTHOOK: type: DROPINDEX
+POSTHOOK: Input: default@src

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,132 @@
+PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer
+-- and populating that information in partitions' metadata.  In particular, those cases
+-- where joins may be auto converted to map joins.
+
+CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table
+POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer
+-- and populating that information in partitions' metadata.  In particular, those cases
+-- where joins may be auto converted to map joins.
+
+CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table
+PREHOOK: query: -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted
+INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table@part=1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted
+INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table@part=1
+POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+part                	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	test_table          	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
+	totalSize           	11996               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin.
+-- Tests a join which is not converted to a map join, the output should be bucketed and sorted.
+
+INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table@part=1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin.
+-- Tests a join which is not converted to a map join, the output should be bucketed and sorted.
+
+INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table@part=1
+POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+part                	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	test_table          	 
+#### A masked pattern was here ####
+Protect Mode:       	None                	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
+	numFiles            	1                   
+	numRows             	-1                  
+	rawDataSize         	-1                  
+	totalSize           	11996               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,492 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+ SELECT a.key, a.value, b.key, b.value1,  b.value2
+ FROM 
+  (
+  SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key
+  ) a
+ FULL OUTER JOIN 
+ (
+  SELECT src2.key as key, count(distinct(src2.value)) AS value1, 
+  count(distinct(src2.key)) AS value2
+  FROM src1 src2 group by src2.key
+ ) b 
+ ON (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+ SELECT a.key, a.value, b.key, b.value1,  b.value2
+ FROM 
+  (
+  SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key
+  ) a
+ FULL OUTER JOIN 
+ (
+  SELECT src2.key as key, count(distinct(src2.value)) AS value1, 
+  count(distinct(src2.key)) AS value2
+  FROM src1 src2 group by src2.key
+ ) b 
+ ON (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP SORT, 1)
+        Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 1), Reducer 5 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 5 <- Map 4 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(DISTINCT value), count(DISTINCT key)
+                      keys: key (type: string), value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(value)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1}
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT a.key, a.value, b.key, b.value1,  b.value2
+ FROM 
+  (
+  SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key
+  ) a
+ FULL OUTER JOIN 
+ (
+  SELECT src2.key as key, count(distinct(src2.value)) AS value1,
+  count(distinct(src2.key)) AS value2
+  FROM src1 src2 group by src2.key
+ ) b 
+ ON (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.key, a.value, b.key, b.value1,  b.value2
+ FROM 
+  (
+  SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key
+  ) a
+ FULL OUTER JOIN 
+ (
+  SELECT src2.key as key, count(distinct(src2.value)) AS value1,
+  count(distinct(src2.key)) AS value2
+  FROM src1 src2 group by src2.key
+ ) b 
+ ON (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+0	3	NULL	NULL	NULL
+10	1	NULL	NULL	NULL
+100	2	NULL	NULL	NULL
+103	2	NULL	NULL	NULL
+104	2	NULL	NULL	NULL
+105	1	NULL	NULL	NULL
+11	1	NULL	NULL	NULL
+111	1	NULL	NULL	NULL
+113	2	NULL	NULL	NULL
+114	1	NULL	NULL	NULL
+116	1	NULL	NULL	NULL
+118	2	NULL	NULL	NULL
+119	3	NULL	NULL	NULL
+12	2	NULL	NULL	NULL
+120	2	NULL	NULL	NULL
+125	2	NULL	NULL	NULL
+126	1	NULL	NULL	NULL
+128	3	128	1	1
+129	2	NULL	NULL	NULL
+131	1	NULL	NULL	NULL
+133	1	NULL	NULL	NULL
+134	2	NULL	NULL	NULL
+136	1	NULL	NULL	NULL
+137	2	NULL	NULL	NULL
+138	4	NULL	NULL	NULL
+143	1	NULL	NULL	NULL
+145	1	NULL	NULL	NULL
+146	2	146	1	1
+149	2	NULL	NULL	NULL
+15	2	NULL	NULL	NULL
+150	1	150	1	1
+152	2	NULL	NULL	NULL
+153	1	NULL	NULL	NULL
+155	1	NULL	NULL	NULL
+156	1	NULL	NULL	NULL
+157	1	NULL	NULL	NULL
+158	1	NULL	NULL	NULL
+160	1	NULL	NULL	NULL
+162	1	NULL	NULL	NULL
+163	1	NULL	NULL	NULL
+164	2	NULL	NULL	NULL
+165	2	NULL	NULL	NULL
+166	1	NULL	NULL	NULL
+167	3	NULL	NULL	NULL
+168	1	NULL	NULL	NULL
+169	4	NULL	NULL	NULL
+17	1	NULL	NULL	NULL
+170	1	NULL	NULL	NULL
+172	2	NULL	NULL	NULL
+174	2	NULL	NULL	NULL
+175	2	NULL	NULL	NULL
+176	2	NULL	NULL	NULL
+177	1	NULL	NULL	NULL
+178	1	NULL	NULL	NULL
+179	2	NULL	NULL	NULL
+18	2	NULL	NULL	NULL
+180	1	NULL	NULL	NULL
+181	1	NULL	NULL	NULL
+183	1	NULL	NULL	NULL
+186	1	NULL	NULL	NULL
+187	3	NULL	NULL	NULL
+189	1	NULL	NULL	NULL
+19	1	NULL	NULL	NULL
+190	1	NULL	NULL	NULL
+191	2	NULL	NULL	NULL
+192	1	NULL	NULL	NULL
+193	3	NULL	NULL	NULL
+194	1	NULL	NULL	NULL
+195	2	NULL	NULL	NULL
+196	1	NULL	NULL	NULL
+197	2	NULL	NULL	NULL
+199	3	NULL	NULL	NULL
+2	1	NULL	NULL	NULL
+20	1	NULL	NULL	NULL
+200	2	NULL	NULL	NULL
+201	1	NULL	NULL	NULL
+202	1	NULL	NULL	NULL
+203	2	NULL	NULL	NULL
+205	2	NULL	NULL	NULL
+207	2	NULL	NULL	NULL
+208	3	NULL	NULL	NULL
+209	2	NULL	NULL	NULL
+213	2	213	1	1
+214	1	NULL	NULL	NULL
+216	2	NULL	NULL	NULL
+217	2	NULL	NULL	NULL
+218	1	NULL	NULL	NULL
+219	2	NULL	NULL	NULL
+221	2	NULL	NULL	NULL
+222	1	NULL	NULL	NULL
+223	2	NULL	NULL	NULL
+224	2	224	1	1
+226	1	NULL	NULL	NULL
+228	1	NULL	NULL	NULL
+229	2	NULL	NULL	NULL
+230	5	NULL	NULL	NULL
+233	2	NULL	NULL	NULL
+235	1	NULL	NULL	NULL
+237	2	NULL	NULL	NULL
+238	2	238	1	1
+239	2	NULL	NULL	NULL
+24	2	NULL	NULL	NULL
+241	1	NULL	NULL	NULL
+242	2	NULL	NULL	NULL
+244	1	NULL	NULL	NULL
+247	1	NULL	NULL	NULL
+248	1	NULL	NULL	NULL
+249	1	NULL	NULL	NULL
+252	1	NULL	NULL	NULL
+255	2	255	1	1
+256	2	NULL	NULL	NULL
+257	1	NULL	NULL	NULL
+258	1	NULL	NULL	NULL
+26	2	NULL	NULL	NULL
+260	1	NULL	NULL	NULL
+262	1	NULL	NULL	NULL
+263	1	NULL	NULL	NULL
+265	2	NULL	NULL	NULL
+266	1	NULL	NULL	NULL
+27	1	NULL	NULL	NULL
+272	2	NULL	NULL	NULL
+273	3	273	1	1
+274	1	NULL	NULL	NULL
+275	1	NULL	NULL	NULL
+277	4	NULL	NULL	NULL
+278	2	278	1	1
+28	1	NULL	NULL	NULL
+280	2	NULL	NULL	NULL
+281	2	NULL	NULL	NULL
+282	2	NULL	NULL	NULL
+283	1	NULL	NULL	NULL
+284	1	NULL	NULL	NULL
+285	1	NULL	NULL	NULL
+286	1	NULL	NULL	NULL
+287	1	NULL	NULL	NULL
+288	2	NULL	NULL	NULL
+289	1	NULL	NULL	NULL
+291	1	NULL	NULL	NULL
+292	1	NULL	NULL	NULL
+296	1	NULL	NULL	NULL
+298	3	NULL	NULL	NULL
+30	1	NULL	NULL	NULL
+302	1	NULL	NULL	NULL
+305	1	NULL	NULL	NULL
+306	1	NULL	NULL	NULL
+307	2	NULL	NULL	NULL
+308	1	NULL	NULL	NULL
+309	2	NULL	NULL	NULL
+310	1	NULL	NULL	NULL
+311	3	311	1	1
+315	1	NULL	NULL	NULL
+316	3	NULL	NULL	NULL
+317	2	NULL	NULL	NULL
+318	3	NULL	NULL	NULL
+321	2	NULL	NULL	NULL
+322	2	NULL	NULL	NULL
+323	1	NULL	NULL	NULL
+325	2	NULL	NULL	NULL
+327	3	NULL	NULL	NULL
+33	1	NULL	NULL	NULL
+331	2	NULL	NULL	NULL
+332	1	NULL	NULL	NULL
+333	2	NULL	NULL	NULL
+335	1	NULL	NULL	NULL
+336	1	NULL	NULL	NULL
+338	1	NULL	NULL	NULL
+339	1	NULL	NULL	NULL
+34	1	NULL	NULL	NULL
+341	1	NULL	NULL	NULL
+342	2	NULL	NULL	NULL
+344	2	NULL	NULL	NULL
+345	1	NULL	NULL	NULL
+348	5	NULL	NULL	NULL
+35	3	NULL	NULL	NULL
+351	1	NULL	NULL	NULL
+353	2	NULL	NULL	NULL
+356	1	NULL	NULL	NULL
+360	1	NULL	NULL	NULL
+362	1	NULL	NULL	NULL
+364	1	NULL	NULL	NULL
+365	1	NULL	NULL	NULL
+366	1	NULL	NULL	NULL
+367	2	NULL	NULL	NULL
+368	1	NULL	NULL	NULL
+369	3	369	1	1
+37	2	NULL	NULL	NULL
+373	1	NULL	NULL	NULL
+374	1	NULL	NULL	NULL
+375	1	NULL	NULL	NULL
+377	1	NULL	NULL	NULL
+378	1	NULL	NULL	NULL
+379	1	NULL	NULL	NULL
+382	2	NULL	NULL	NULL
+384	3	NULL	NULL	NULL
+386	1	NULL	NULL	NULL
+389	1	NULL	NULL	NULL
+392	1	NULL	NULL	NULL
+393	1	NULL	NULL	NULL
+394	1	NULL	NULL	NULL
+395	2	NULL	NULL	NULL
+396	3	NULL	NULL	NULL
+397	2	NULL	NULL	NULL
+399	2	NULL	NULL	NULL
+4	1	NULL	NULL	NULL
+400	1	NULL	NULL	NULL
+401	5	401	1	1
+402	1	NULL	NULL	NULL
+403	3	NULL	NULL	NULL
+404	2	NULL	NULL	NULL
+406	4	406	1	1
+407	1	NULL	NULL	NULL
+409	3	NULL	NULL	NULL
+41	1	NULL	NULL	NULL
+411	1	NULL	NULL	NULL
+413	2	NULL	NULL	NULL
+414	2	NULL	NULL	NULL
+417	3	NULL	NULL	NULL
+418	1	NULL	NULL	NULL
+419	1	NULL	NULL	NULL
+42	2	NULL	NULL	NULL
+421	1	NULL	NULL	NULL
+424	2	NULL	NULL	NULL
+427	1	NULL	NULL	NULL
+429	2	NULL	NULL	NULL
+43	1	NULL	NULL	NULL
+430	3	NULL	NULL	NULL
+431	3	NULL	NULL	NULL
+432	1	NULL	NULL	NULL
+435	1	NULL	NULL	NULL
+436	1	NULL	NULL	NULL
+437	1	NULL	NULL	NULL
+438	3	NULL	NULL	NULL
+439	2	NULL	NULL	NULL
+44	1	NULL	NULL	NULL
+443	1	NULL	NULL	NULL
+444	1	NULL	NULL	NULL
+446	1	NULL	NULL	NULL
+448	1	NULL	NULL	NULL
+449	1	NULL	NULL	NULL
+452	1	NULL	NULL	NULL
+453	1	NULL	NULL	NULL
+454	3	NULL	NULL	NULL
+455	1	NULL	NULL	NULL
+457	1	NULL	NULL	NULL
+458	2	NULL	NULL	NULL
+459	2	NULL	NULL	NULL
+460	1	NULL	NULL	NULL
+462	2	NULL	NULL	NULL
+463	2	NULL	NULL	NULL
+466	3	NULL	NULL	NULL
+467	1	NULL	NULL	NULL
+468	4	NULL	NULL	NULL
+469	5	NULL	NULL	NULL
+47	1	NULL	NULL	NULL
+470	1	NULL	NULL	NULL
+472	1	NULL	NULL	NULL
+475	1	NULL	NULL	NULL
+477	1	NULL	NULL	NULL
+478	2	NULL	NULL	NULL
+479	1	NULL	NULL	NULL
+480	3	NULL	NULL	NULL
+481	1	NULL	NULL	NULL
+482	1	NULL	NULL	NULL
+483	1	NULL	NULL	NULL
+484	1	NULL	NULL	NULL
+485	1	NULL	NULL	NULL
+487	1	NULL	NULL	NULL
+489	4	NULL	NULL	NULL
+490	1	NULL	NULL	NULL
+491	1	NULL	NULL	NULL
+492	2	NULL	NULL	NULL
+493	1	NULL	NULL	NULL
+494	1	NULL	NULL	NULL
+495	1	NULL	NULL	NULL
+496	1	NULL	NULL	NULL
+497	1	NULL	NULL	NULL
+498	3	NULL	NULL	NULL
+5	3	NULL	NULL	NULL
+51	2	NULL	NULL	NULL
+53	1	NULL	NULL	NULL
+54	1	NULL	NULL	NULL
+57	1	NULL	NULL	NULL
+58	2	NULL	NULL	NULL
+64	1	NULL	NULL	NULL
+65	1	NULL	NULL	NULL
+66	1	66	1	1
+67	2	NULL	NULL	NULL
+69	1	NULL	NULL	NULL
+70	3	NULL	NULL	NULL
+72	2	NULL	NULL	NULL
+74	1	NULL	NULL	NULL
+76	2	NULL	NULL	NULL
+77	1	NULL	NULL	NULL
+78	1	NULL	NULL	NULL
+8	1	NULL	NULL	NULL
+80	1	NULL	NULL	NULL
+82	1	NULL	NULL	NULL
+83	2	NULL	NULL	NULL
+84	2	NULL	NULL	NULL
+85	1	NULL	NULL	NULL
+86	1	NULL	NULL	NULL
+87	1	NULL	NULL	NULL
+9	1	NULL	NULL	NULL
+90	3	NULL	NULL	NULL
+92	1	NULL	NULL	NULL
+95	2	NULL	NULL	NULL
+96	1	NULL	NULL	NULL
+97	2	NULL	NULL	NULL
+98	2	98	1	1
+NULL	NULL		7	1

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/join28.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/join28.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/join28.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/join28.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,286 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT subq.key1, z.value
+FROM
+(SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
+ FROM src1 x JOIN src y ON (x.key = y.key)) subq
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT subq.key1, z.value
+FROM
+(SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
+ FROM src1 x JOIN src y ON (x.key = y.key)) subq
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Map 5 (GROUP PARTITION-LEVEL SORT, 1), Reducer 2 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: z
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col5
+                Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col5 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT subq.key1, z.value
+FROM
+(SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
+ FROM src1 x JOIN src y ON (x.key = y.key)) subq
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest_j1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT subq.key1, z.value
+FROM
+(SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
+ FROM src1 x JOIN src y ON (x.key = y.key)) subq
+ JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+128	val_128
+146	val_146
+146	val_146
+146	val_146
+146	val_146
+150	val_150
+213	val_213
+213	val_213
+213	val_213
+213	val_213
+224	val_224
+224	val_224
+224	val_224
+224	val_224
+238	val_238
+238	val_238
+238	val_238
+238	val_238
+255	val_255
+255	val_255
+255	val_255
+255	val_255
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+278	val_278
+278	val_278
+278	val_278
+278	val_278
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+66	val_66
+98	val_98
+98	val_98
+98	val_98
+98	val_98

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/join29.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/join29.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/join29.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/join29.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,209 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT subq1.key, subq1.cnt, subq2.cnt
+FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
+     (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT subq1.key, subq1.cnt, subq2.cnt
+FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
+     (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 1), Reducer 5 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 5 <- Map 4 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count(1)
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col1 (type: bigint)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count(1)
+                        keys: key (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col1 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), UDFToInteger(_col3) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+        Reducer 5 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT subq1.key, subq1.cnt, subq2.cnt
+FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
+     (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest_j1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT subq1.key, subq1.cnt, subq2.cnt
+FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
+     (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.cnt1 EXPRESSION [(src1)x.null, ]
+POSTHOOK: Lineage: dest_j1.cnt2 EXPRESSION [(src)y.null, ]
+POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+128	1	3
+146	1	2
+150	1	1
+213	1	2
+224	1	2
+238	1	2
+255	1	2
+273	1	3
+278	1	2
+311	1	3
+369	1	3
+401	1	5
+406	1	4
+66	1	1
+98	1	2

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/join30.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/join30.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/join30.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/join30.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,161 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, cnt INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, cnt INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest_j1 
+SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Reducer 2 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count(1)
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest_j1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 
+SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.cnt EXPRESSION [(src1)x.null, (src)y.null, ]
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+128	3
+146	2
+150	1
+213	2
+224	2
+238	2
+255	2
+273	3
+278	2
+311	3
+369	3
+401	5
+406	4
+66	1
+98	2